hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject svn commit: r1495297 [34/46] - in /hadoop/common/branches/branch-1-win: ./ bin/ conf/ ivy/ lib/jdiff/ src/c++/libhdfs/docs/ src/c++/libhdfs/tests/conf/ src/contrib/capacity-scheduler/ivy/ src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred...
Date Fri, 21 Jun 2013 06:37:39 GMT
Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestPersistBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestPersistBlocks.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestPersistBlocks.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestPersistBlocks.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,323 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.namenode.FSImage;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.log4j.Level;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * A JUnit test for checking if restarting DFS preserves the
+ * blocks that are part of an unclosed file.
+ */
+public class TestPersistBlocks {
+  static {
+    ((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  private static final String HADOOP_1_0_MULTIBLOCK_TGZ =
+      "hadoop-1.0-multiblock-file.tgz";
+  
+  private static final int BLOCK_SIZE = 4096;
+  private static final int NUM_BLOCKS = 5;
+
+  private static final String FILE_NAME = "/data";
+  private static final Path FILE_PATH = new Path(FILE_NAME);
+  
+  static final byte[] DATA_BEFORE_RESTART = new byte[BLOCK_SIZE * NUM_BLOCKS];
+  static final byte[] DATA_AFTER_RESTART = new byte[BLOCK_SIZE * NUM_BLOCKS];
+  
+
+  static {
+    Random rand = new Random();
+    rand.nextBytes(DATA_BEFORE_RESTART);
+    rand.nextBytes(DATA_AFTER_RESTART);
+  }
+  
+  /** check if DFS remains in proper condition after a restart 
+   **/
+
+  @Test  
+  public void TestRestartDfsWithFlush() throws Exception {
+    testRestartDfs(true);
+  }
+  
+  
+  /** check if DFS remains in proper condition after a restart 
+   **/
+  @Test
+  public void TestRestartDfsWithSync() throws Exception {
+    testRestartDfs(false);
+  }
+  
+  /**
+   * check if DFS remains in proper condition after a restart
+   * @param useFlush - if true then flush is used instead of sync (ie hflush)
+   */
+  void testRestartDfs(boolean useFlush) throws Exception {
+    final Configuration conf = new Configuration();
+    // Turn off persistent IPC, so that the DFSClient can survive NN restart
+    conf.setInt("ipc.client.connection.maxidletime", 0);
+    conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
+    MiniDFSCluster cluster = null;
+
+    long len = 0;
+    FSDataOutputStream stream;
+    try {
+      // small safemode extension to make the test run faster.
+      conf.set("dfs.safemode.extension", "1");
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster.waitActive();
+      FileSystem fs = cluster.getFileSystem();
+      // Creating a file with 4096 blockSize to write multiple blocks
+      stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
+      stream.write(DATA_BEFORE_RESTART);
+      if (useFlush)
+        stream.flush();
+      else 
+         stream.sync();
+      
+      // Wait for at least a few blocks to get through
+      while (len <= BLOCK_SIZE) {
+        FileStatus status = fs.getFileStatus(FILE_PATH);
+        len = status.getLen();
+        Thread.sleep(100);
+      }
+      
+      // explicitly do NOT close the file.
+      cluster.restartNameNode();
+      
+      // Check that the file has no less bytes than before the restart
+      // This would mean that blocks were successfully persisted to the log
+      FileStatus status = fs.getFileStatus(FILE_PATH);
+      assertTrue("Length too short: " + status.getLen(),
+          status.getLen() >= len);
+      
+      // And keep writing (ensures that leases are also persisted correctly)
+      stream.write(DATA_AFTER_RESTART);
+      stream.close();
+      
+      // Verify that the data showed up, both from before and after the restart.
+      FSDataInputStream readStream = fs.open(FILE_PATH);
+      try {
+        byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length];
+        IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
+        assertArrayEquals(DATA_BEFORE_RESTART, verifyBuf);
+        
+        IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
+        assertArrayEquals(DATA_AFTER_RESTART, verifyBuf);
+      } finally {
+        IOUtils.closeStream(readStream);
+      }
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+  
+  @Test
+  public void testRestartWithPartialBlockHflushed() throws IOException {
+    final Configuration conf = new Configuration();
+    // Turn off persistent IPC, so that the DFSClient can survive NN restart
+    conf.setInt("ipc.client.connection.maxidletime", 0);
+    conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
+    MiniDFSCluster cluster = null;
+
+    FSDataOutputStream stream;
+    try {
+      // small safemode extension to make the test run faster.
+      conf.set("dfs.safemode.extension", "1");
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster.waitActive();
+      FileSystem fs = cluster.getFileSystem();
+      NameNode.getAddress(conf).getPort();
+      // Creating a file with 4096 blockSize to write multiple blocks
+      stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
+      stream.write(DATA_BEFORE_RESTART);
+      stream.write((byte)1);
+      stream.sync();
+      
+      // explicitly do NOT close the file before restarting the NN.
+      cluster.restartNameNode();
+      
+      // this will fail if the final block of the file is prematurely COMPLETEd
+      stream.write((byte)2);
+      stream.sync(); // hflush was called sync in 20 append
+      stream.close();
+      
+      assertEquals(DATA_BEFORE_RESTART.length + 2,
+          fs.getFileStatus(FILE_PATH).getLen());
+      
+      FSDataInputStream readStream = fs.open(FILE_PATH);
+      try {
+        byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length + 2];
+        IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
+        byte[] expectedBuf = new byte[DATA_BEFORE_RESTART.length + 2];
+        System.arraycopy(DATA_BEFORE_RESTART, 0, expectedBuf, 0,
+            DATA_BEFORE_RESTART.length);
+        System.arraycopy(new byte[]{1, 2}, 0, expectedBuf,
+            DATA_BEFORE_RESTART.length, 2);
+        assertArrayEquals(expectedBuf, verifyBuf);
+      } finally {
+        IOUtils.closeStream(readStream);
+      }
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+  
+  @Test
+  public void testRestartWithAppend() throws IOException {
+    final Configuration conf = new Configuration();
+    conf.set("dfs.safemode.extension", "1");
+    conf.setBoolean("dfs.support.broken.append", true);
+    conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
+    MiniDFSCluster cluster = null;
+
+    FSDataOutputStream stream;
+    try {
+      // small safemode extension to make the test run faster.
+      conf.set("dfs.safemode.extension", "1");
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster.waitActive();
+      FileSystem fs = cluster.getFileSystem();
+      NameNode.getAddress(conf).getPort();
+      // Creating a file with 4096 blockSize to write multiple blocks
+      stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
+      stream.write(DATA_BEFORE_RESTART, 0, DATA_BEFORE_RESTART.length / 2);
+      stream.close();
+      stream = fs.append(FILE_PATH, BLOCK_SIZE);
+      stream.write(DATA_BEFORE_RESTART, DATA_BEFORE_RESTART.length / 2,
+          DATA_BEFORE_RESTART.length / 2);
+      stream.close();
+      
+      assertEquals(DATA_BEFORE_RESTART.length,
+          fs.getFileStatus(FILE_PATH).getLen());
+      
+      cluster.restartNameNode();
+      
+      assertEquals(DATA_BEFORE_RESTART.length,
+          fs.getFileStatus(FILE_PATH).getLen());
+      
+      FSDataInputStream readStream = fs.open(FILE_PATH);
+      try {
+        byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length];
+        IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
+        assertArrayEquals(DATA_BEFORE_RESTART, verifyBuf);
+      } finally {
+        IOUtils.closeStream(readStream);
+      }
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+  
+
+  static void assertFileExists(File f) {
+    Assert.assertTrue("File " + f + " should exist", f.exists());
+  }
+  
+  static String readFile(FileSystem fs, Path fileName) throws IOException {
+    ByteArrayOutputStream os = new ByteArrayOutputStream();
+    IOUtils.copyBytes(fs.open(fileName), os, 1024, true);
+    return os.toString();
+  }
+  
+  /**
+   * Earlier versions of HDFS didn't persist block allocation to the edit log.
+   * This makes sure that we can still load an edit log when the OP_CLOSE
+   * is the opcode which adds all of the blocks. This is a regression
+   * test for HDFS-2773.
+   * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
+   * which has a multi-block file. This is similar to the tests in
+   * {@link TestDFSUpgradeFromImage} but none of those images include
+   * a multi-block file.
+   */
+  @Test
+  public void testEarlierVersionEditLog() throws Exception {
+    final Configuration conf = new Configuration();
+        
+    String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+      + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
+    String testDir = System.getProperty("test.build.data", "build/test/data");
+    File dfsDir = new File(testDir, "image-1.0");
+    if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
+      throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
+    }
+    FileUtil.unTar(new File(tarFile), new File(testDir));
+
+    File nameDir = new File(dfsDir, "name");
+    assertFileExists(nameDir);
+    File dataDir = new File(dfsDir, "data");
+    assertFileExists(dataDir);
+    
+    conf.set("dfs.name.dir", nameDir.getAbsolutePath());
+    conf.set("dfs.data.dir", dataDir.getAbsolutePath());
+    
+    conf.setBoolean("dfs.support.broken.append", true);
+    // small safemode extension to make the test run faster.
+    conf.set("dfs.safemode.extension", "1");
+    MiniDFSCluster cluster = new  MiniDFSCluster(0, conf, 1, false, false,
+        StartupOption.UPGRADE,
+        null);
+    cluster.waitActive();
+
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      Path testPath = new Path("/user/todd/4blocks");
+      // Read it without caring about the actual data within - we just need
+      // to make sure that the block states and locations are OK.
+      readFile(fs, testPath);
+      
+      // Ensure that we can append to it - if the blocks were in some funny
+      // state we'd get some kind of issue here. 
+      FSDataOutputStream stm = fs.append(testPath);
+      try {
+        stm.write(1);
+      } finally {
+        IOUtils.closeStream(stm);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}
+

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestQuota.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestQuota.java Fri Jun 21 06:37:27 2013
@@ -62,7 +62,7 @@ public class TestQuota extends TestCase 
     // Space quotas
     final int DEFAULT_BLOCK_SIZE = 512;
     conf.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -509,7 +509,7 @@ public class TestQuota extends TestCase 
     // set a smaller block size so that we can test with smaller 
     // diskspace quotas
     conf.set("dfs.block.size", "512");
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java Fri Jun 21 06:37:27 2013
@@ -48,7 +48,6 @@ public class TestRenameWhileOpen extends
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
-    conf.setBoolean("dfs.support.append", true);
 
     // create cluster
     System.out.println("Test 1*****************************");
@@ -132,7 +131,6 @@ public class TestRenameWhileOpen extends
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
-    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 2************************************");
 
     // create cluster
@@ -205,7 +203,6 @@ public class TestRenameWhileOpen extends
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
-    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 3************************************");
 
     // create cluster
@@ -268,7 +265,6 @@ public class TestRenameWhileOpen extends
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
-    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 4************************************");
 
     // create cluster

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestSafeMode.java Fri Jun 21 06:37:27 2013
@@ -18,26 +18,32 @@
 
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.*;
+
 import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.ipc.RemoteException;
+import org.junit.Assert;
+import org.junit.Test;
 
 /**
  * This test makes sure that if SafeMode is manually entered, NameNode does not
  * come out of safe mode even after the startup safemode conditions are met.
  */
-public class TestSafeMode extends TestCase {
+public class TestSafeMode {
   
   static Log LOG = LogFactory.getLog(TestSafeMode.class);
   
+  @Test
   public void testManualSafeMode() throws IOException {
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
@@ -92,4 +98,128 @@ public class TestSafeMode extends TestCa
       if(cluster!= null) cluster.shutdown();
     }
   }
+
+  /**
+   * Verify that the NameNode stays in safemode when dfs.safemode.datanode.min
+   * is set to a number greater than the number of live datanodes.
+   */
+  @Test
+  public void testDatanodeThreshold() throws IOException {
+    MiniDFSCluster cluster = null;
+    DistributedFileSystem fs = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
+      conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
+
+      // bring up a cluster with no datanodes
+      cluster = new MiniDFSCluster(conf, 0, true, null);
+      cluster.waitActive();
+      fs = (DistributedFileSystem)cluster.getFileSystem();
+
+      assertTrue("No datanode started, but we require one - safemode expected",
+                 fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+
+      String tipMsg = cluster.getNameNode().getNamesystem().getSafeModeTip();
+      assertTrue("Safemode tip message looks right",
+                 tipMsg.contains("The number of live datanodes 0 needs an " +
+                                 "additional 1 live"));
+
+      // Start a datanode
+      cluster.startDataNodes(conf, 1, true, null, null);
+
+      // Wait long enough for safemode check to refire
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException ignored) {}
+
+      // We now should be out of safe mode.
+      assertFalse(
+        "Out of safe mode after starting datanode.",
+        fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+    } finally {
+      if (fs != null) fs.close();
+      if (cluster != null) cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testSafeModeWhenZeroBlockLocations() throws IOException {
+    MiniDFSCluster cluster = null;
+    FileSystem fs = null;
+    try {
+      Configuration conf = new Configuration();
+      // disable safemode extension to make the test run faster.
+      conf.set("dfs.safemode.extension", "1");
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster.waitActive();
+
+      fs = cluster.getFileSystem();
+      Path file1 = new Path("/tmp/testManualSafeMode/file1");
+      Path file2 = new Path("/tmp/testManualSafeMode/file2");
+
+      LOG.info("Created file1 and file2.");
+
+      // create two files with one block each.
+      DFSTestUtil.createFile(fs, file1, 1000, (short) 1, 0);
+      DFSTestUtil.createFile(fs, file2, 2000, (short) 1, 0);
+      checkGetBlockLocationsWorks(fs, file1);
+
+      NameNode namenode = cluster.getNameNode();
+      namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      Assert.assertTrue("should still be in SafeMode", namenode.isInSafeMode());
+      // getBlock locations should still work since block locations exists
+      checkGetBlockLocationsWorks(fs, file1);
+      namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+      assertFalse("should not be in SafeMode", namenode.isInSafeMode());
+      Assert.assertFalse("should not be in SafeMode", namenode.isInSafeMode());
+
+      // Now 2nd part of the tests where there aren't block locations
+      cluster.shutdownDataNodes();
+      cluster.shutdownNameNode();
+
+      // now bring up just the NameNode.
+      cluster.restartNameNode();
+      cluster.waitActive();
+
+      LOG.info("Restarted cluster with just the NameNode");
+
+      namenode = cluster.getNameNode();
+
+      Assert.assertTrue("No datanode is started. Should be in SafeMode",
+          namenode.isInSafeMode());
+      FileStatus stat = fs.getFileStatus(file1);
+      try {
+        fs.getFileBlockLocations(stat, 0, 1000);
+        Assert.assertTrue("Should have got safemode exception", false);
+      } catch (SafeModeException e) {
+        // as expected
+      } catch (RemoteException re) {
+        if (!re.getClassName().equals(SafeModeException.class.getName()))
+          Assert.assertTrue("Should have got safemode exception", false);
+      }
+
+      namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+      Assert.assertFalse("Should not be in safemode", namenode.isInSafeMode());
+      checkGetBlockLocationsWorks(fs, file1);
+
+    } finally {
+      if (fs != null)
+        fs.close();
+      if (cluster != null)
+        cluster.shutdown();
+    }
+  }
+
+  void checkGetBlockLocationsWorks(FileSystem fs, Path fileName)
+      throws IOException {
+    FileStatus stat = fs.getFileStatus(fileName);
+    try {
+      fs.getFileBlockLocations(stat, 0, 1000);
+    } catch (SafeModeException e) {
+      Assert.assertTrue("Should have not got safemode exception", false);
+    } catch (RemoteException re) {
+      Assert.assertTrue("Should have not got safemode exception", false);
+    }
+  }
 }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Fri Jun 21 06:37:27 2013
@@ -208,7 +208,7 @@ public class TestShortCircuitLocalRead {
             @Override
             public ClientDatanodeProtocol run() throws Exception {
               return DFSClient.createClientDatanodeProtocolProxy(
-                  dnInfo, conf, 60000);
+                  dnInfo, conf, 60000, false);
             }
           });
       
@@ -226,7 +226,7 @@ public class TestShortCircuitLocalRead {
             @Override
             public ClientDatanodeProtocol run() throws Exception {
               return DFSClient.createClientDatanodeProtocolProxy(
-                  dnInfo, conf, 60000);
+                  dnInfo, conf, 60000, false);
             }
           });
       try {

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestSyncingWriterInterrupted.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestSyncingWriterInterrupted.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestSyncingWriterInterrupted.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestSyncingWriterInterrupted.java Fri Jun 21 06:37:27 2013
@@ -1,3 +1,18 @@
+/**
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.*;
@@ -24,7 +39,7 @@ public class TestSyncingWriterInterrupte
   @Before
   public void setUp() throws Exception {
     conf = new Configuration();
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     conf.setInt("dfs.client.block.recovery.retries", 1);
   }
   

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import java.io.OutputStream;
+import org.junit.Test;
+
+/**
+ * Regression test for HDFS-1542, a deadlock between the main thread
+ * and the DFSOutputStream.DataStreamer thread caused because
+ * Configuration.writeXML holds a lock on itself while writing to DFS.
+ */
+public class TestWriteConfigurationToDFS {
+  @Test(timeout=60000)
+  public void testWriteConf() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
+    System.out.println("Setting conf in: " + System.identityHashCode(conf));
+    
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null, null);
+    cluster.waitActive();
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      Path filePath = new Path("/testWriteConf.xml");
+      OutputStream os = fs.create(filePath);
+      StringBuilder longString = new StringBuilder();
+      for (int i = 0; i < 100000; i++) {
+        longString.append("hello");
+      } // 500KB
+      conf.set("foobar", longString.toString());
+      conf.writeXml(os);
+      os.close();
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}
\ No newline at end of file

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/hadoop-1.0-multiblock-file.tgz
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/hadoop-1.0-multiblock-file.tgz?rev=1495297&view=auto
==============================================================================
Binary file - no diff available.

Propchange: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/hadoop-1.0-multiblock-file.tgz
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.EnumSet;
+
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
+import org.junit.Test;
+
+/**
+ * Test for {@link LayoutVersion}
+ */
+public class TestLayoutVersion {
+  
+  /**
+   * Tests to make sure a given layout version supports all the
+   * features from the ancestor
+   */
+  @Test
+  public void testFeaturesFromAncestorSupported() {
+    for (Feature f : Feature.values()) {
+      validateFeatureList(f);
+    }
+  }
+  
+  /**
+   * Test to make sure 0.20.203 supports delegation token
+   */
+  @Test
+  public void testRelease203() {
+    assertTrue(LayoutVersion.supports(Feature.DELEGATION_TOKEN, 
+        Feature.RESERVED_REL20_203.lv));
+  }
+  
+  /**
+   * Test to make sure 0.20.204 supports delegation token
+   */
+  @Test
+  public void testRelease204() {
+    assertTrue(LayoutVersion.supports(Feature.DELEGATION_TOKEN, 
+        Feature.RESERVED_REL20_204.lv));
+  }
+  
+  /**
+   * Test to make sure release 1.2.0 support CONCAT
+   */
+  @Test
+  public void testRelease1_2_0() {
+    assertTrue(LayoutVersion.supports(Feature.CONCAT, 
+        Feature.RESERVED_REL1_2_0.lv));
+  }
+  
+  /**
+   * Given feature {@code f}, ensures the layout version of that feature
+   * supports all the features supported by it's ancestor.
+   */
+  private void validateFeatureList(Feature f) {
+    int lv = f.lv;
+    int ancestorLV = f.ancestorLV;
+    EnumSet<Feature> ancestorSet = LayoutVersion.map.get(ancestorLV);
+    assertNotNull(ancestorSet);
+    for (Feature  feature : ancestorSet) {
+      assertTrue("LV " + lv + " does nto support " + feature
+          + " supported by the ancestor LV " + f.ancestorLV,
+          LayoutVersion.supports(feature, lv));
+    }
+  }
+}

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Fri Jun 21 06:37:27 2013
@@ -106,7 +106,7 @@ public class TestDelegationTokenForProxy
   public void setUp() throws Exception {
     config = new Configuration();
     config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
-    config.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
+    config.setBoolean("dfs.support.broken.append", true);
     config.setLong(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
     config.setLong(
@@ -206,7 +206,7 @@ public class TestDelegationTokenForProxy
       final URL url = WebHdfsTestUtil.toUrl(webhdfs, op,  f, new DoAsParam(PROXY_USER));
       WebHdfsTestUtil.LOG.info("url=" + url);
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-      conn = WebHdfsTestUtil.twoStepWrite(conn, op);
+      conn = WebHdfsTestUtil.twoStepWrite(webhdfs, op, conn);
       final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
       out.write("Hello, webhdfs user!".getBytes());
       out.close();
@@ -221,7 +221,7 @@ public class TestDelegationTokenForProxy
       final PostOpParam.Op op = PostOpParam.Op.APPEND;
       final URL url = WebHdfsTestUtil.toUrl(webhdfs, op,  f, new DoAsParam(PROXY_USER));
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-      conn = WebHdfsTestUtil.twoStepWrite(conn, op);
+      conn = WebHdfsTestUtil.twoStepWrite(webhdfs, op, conn);
       final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
       out.write("\nHello again!".getBytes());
       out.close();

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Fri Jun 21 06:37:27 2013
@@ -45,9 +45,10 @@ import org.apache.hadoop.hdfs.protocol.F
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.datanode.BlockTransferThrottler;
+import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
+
 /**
  * This class tests if block replacement request to data nodes work correctly.
  */
@@ -63,7 +64,7 @@ public class TestBlockReplacement extend
     final long TOTAL_BYTES =6*bandwidthPerSec; 
     long bytesToSend = TOTAL_BYTES; 
     long start = Util.now();
-    BlockTransferThrottler throttler = new BlockTransferThrottler(bandwidthPerSec);
+    DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec);
     long totalBytes = 0L;
     long bytesSent = 1024*512L; // 0.5MB
     throttler.throttle(bytesSent);

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java Fri Jun 21 06:37:27 2013
@@ -64,6 +64,11 @@ public class TestDataNodeMXBean {
       // get attribute "getVolumeInfo"
       String volumeInfo = (String)mbs.getAttribute(mxbeanName, "VolumeInfo");
       Assert.assertEquals(datanode.getVolumeInfo(),volumeInfo);
+      // Ensure mxbean's XceiverCount is same as the DataNode's
+      // live value.
+      int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
+          "XceiverCount");
+      Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVersionCheck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVersionCheck.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVersionCheck.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVersionCheck.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+/**
+ * Test the version check the DN performs when connecting to the NN
+ */
+public class TestDataNodeVersionCheck {
+
+  /**
+   * Test the default DN version checking
+   */
+  @Test
+  public void testDefaultVersionCheck() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+
+      DataNode dn = cluster.getDataNodes().get(0);
+    
+      final NamespaceInfo currInfo = new NamespaceInfo(0, 0, 0);
+      assertTrue(dn.isPermittedVersion(currInfo));
+
+      // Different revisions are not permitted
+      NamespaceInfo infoDiffRev = new NamespaceInfo(0, 0, 0) {
+                @Override public String getRevision() { return "bogus"; }
+      };      
+      assertFalse("Different revision is not permitted",
+          dn.isPermittedVersion(infoDiffRev));
+
+      // Different versions are not permitted
+      NamespaceInfo infoDiffVersion = new NamespaceInfo(0, 0, 0) {
+        @Override public String getVersion() { return "bogus"; }
+        @Override public String getRevision() { return "bogus"; }
+      };
+      assertFalse("Different version is not permitted",
+          dn.isPermittedVersion(infoDiffVersion));
+
+      // A bogus version (matching revision but not version)
+      NamespaceInfo bogusVersion = new NamespaceInfo(0, 0, 0) {
+        @Override public String getVersion() { return "bogus"; }
+      };
+      try {
+        dn.isPermittedVersion(bogusVersion);
+        fail("Matched revision with mismatched version");
+      } catch (AssertionError ae) {
+        // Expected
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  /**
+   * Test the "relaxed" DN version checking
+   */
+  @Test
+  public void testRelaxedVersionCheck() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setBoolean(
+          CommonConfigurationKeys.HADOOP_RELAXED_VERSION_CHECK_KEY, true);
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      
+      DataNode dn = cluster.getDataNodes().get(0);
+    
+      final NamespaceInfo currInfo = new NamespaceInfo(0, 0, 0);
+      assertTrue(dn.isPermittedVersion(currInfo));
+
+      // Different revisions are permitted
+      NamespaceInfo infoDiffRev = new NamespaceInfo(0, 0, 0) {
+        @Override public String getRevision() { return "bogus"; }
+      };      
+      assertTrue("Different revisions should be permitted",
+          dn.isPermittedVersion(infoDiffRev));
+
+      // Different versions are not permitted
+      NamespaceInfo infoDiffVersion = new NamespaceInfo(0, 0, 0) {
+        @Override public String getVersion() { return "bogus"; }
+        @Override public String getRevision() { return "bogus"; }
+      };
+      assertFalse("Different version is not permitted",
+          dn.isPermittedVersion(infoDiffVersion));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test no DN version checking
+   */
+  @Test
+  public void testNoVersionCheck() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setBoolean(
+          CommonConfigurationKeys.HADOOP_SKIP_VERSION_CHECK_KEY, true);
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+
+      DataNode dn = cluster.getDataNodes().get(0);
+
+      final NamespaceInfo currInfo = new NamespaceInfo(0, 0, 0);
+      assertTrue(dn.isPermittedVersion(currInfo));
+
+      // Different revisions are permitted
+      NamespaceInfo infoDiffRev = new NamespaceInfo(0, 0, 0) {
+        @Override public String getRevision() { return "bogus"; }
+      };
+      assertTrue("Different revisions should be permitted",
+          dn.isPermittedVersion(infoDiffRev));
+
+      // Different versions are permitted
+      NamespaceInfo infoDiffVersion = new NamespaceInfo(0, 0, 0) {
+        @Override public String getVersion() { return "bogus"; }
+        @Override public String getRevision() { return "bogus"; }
+      };
+      assertTrue("Different versions should be permitted",
+          dn.isPermittedVersion(infoDiffVersion));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}
\ No newline at end of file

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Fri Jun 21 06:37:27 2013
@@ -96,8 +96,8 @@ public class TestDiskError extends TestC
       DFSTestUtil.waitReplication(fs, fileName, (short)1);
 
       // get the block belonged to the created file
-      LocatedBlocks blocks = cluster.getNameNode().namesystem.getBlockLocations(
-          fileName.toString(), 0, (long)fileLen);
+      LocatedBlocks blocks = cluster.getNameNode().getNamesystem()
+          .getBlockLocations(fileName.toString(), 0, (long) fileLen);
       assertEquals(blocks.locatedBlockCount(), 1);
       LocatedBlock block = blocks.get(0);
       

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Fri Jun 21 06:37:27 2013
@@ -36,6 +36,7 @@ import org.apache.hadoop.net.NetUtils;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -120,14 +121,34 @@ public class TestInterDatanodeProtocol e
     return blocks.get(blocks.size() - 1);
   }
 
+  /** Test block MD access via a DN */
+  public void testBlockMetaDataInfo() throws Exception {
+    checkBlockMetaDataInfo(false);
+  }
+
+  /** The same as above, but use hostnames for DN<->DN communication */
+  public void testBlockMetaDataInfoWithHostname() throws Exception {
+    checkBlockMetaDataInfo(true);
+  }
+
   /**
    * The following test first creates a file.
    * It verifies the block information from a datanode.
-   * Then, it updates the block with new information and verifies again. 
+   * Then, it updates the block with new information and verifies again.
+   * @param useDnHostname if DNs should access DNs by hostname (vs IP)
    */
-  public void testBlockMetaDataInfo() throws Exception {
+  private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception {    
     MiniDFSCluster cluster = null;
 
+    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname);
+    if (useDnHostname) {
+      // Since the mini cluster only listens on the loopback we have to
+      // ensure the hostname used to access DNs maps to the loopback. We
+      // do this by telling the DN to advertise localhost as its hostname
+      // instead of the default hostname.
+      conf.set("slave.host.name", "localhost");
+    }
+
     try {
       cluster = new MiniDFSCluster(conf, 3, true, null);
       cluster.waitActive();
@@ -148,7 +169,7 @@ public class TestInterDatanodeProtocol e
       DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
       assertTrue(datanode != null);
       InterDatanodeProtocol idp = DataNode.createInterDataNodeProtocolProxy(
-          datanodeinfo[0], conf, datanode.socketTimeout);
+          datanodeinfo[0], conf, datanode.socketTimeout, useDnHostname);
       
       //stop block scanner, so we could compare lastScanTime
       datanode.blockScannerThread.interrupt();
@@ -184,7 +205,7 @@ public class TestInterDatanodeProtocol e
 
     try {
       proxy = DataNode.createInterDataNodeProtocolProxy(
-          dInfo, conf, 500);
+          dInfo, conf, 500, false);
       fail ("Expected SocketTimeoutException exception, but did not get.");
     } catch (SocketTimeoutException e) {
       DataNode.LOG.info("Got expected Exception: SocketTimeoutException");

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Fri Jun 21 06:37:27 2013
@@ -1049,9 +1049,9 @@ public class NNThroughputBenchmark {
       // start data-nodes; create a bunch of files; generate block reports.
       blockReportObject.generateInputs(ignore);
       // stop replication monitor
-      nameNode.namesystem.replthread.interrupt();
+      nameNode.getNamesystem().replthread.interrupt();
       try {
-        nameNode.namesystem.replthread.join();
+        nameNode.getNamesystem().replthread.join();
       } catch(InterruptedException ei) {
         return;
       }
@@ -1063,7 +1063,7 @@ public class NNThroughputBenchmark {
       // decommission data-nodes
       decommissionNodes();
       // set node replication limit
-      nameNode.namesystem.setNodeReplicationLimit(nodeReplicationLimit);
+      nameNode.getNamesystem().setNodeReplicationLimit(nodeReplicationLimit);
     }
 
     private void decommissionNodes() throws IOException {
@@ -1094,7 +1094,7 @@ public class NNThroughputBenchmark {
       assert daemonId < numThreads : "Wrong daemonId.";
       long start = System.currentTimeMillis();
       // compute data-node work
-      int work = nameNode.namesystem.computeDatanodeWork();
+      int work = nameNode.getNamesystem().computeDatanodeWork();
       long end = System.currentTimeMillis();
       numPendingBlocks += work;
       if(work == 0)

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Fri Jun 21 06:37:27 2013
@@ -17,9 +17,20 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 import java.io.IOException;
+
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 
 public abstract class NameNodeAdapter {
+  /**
+   * Get block locations within the specified range.
+   */
+  public static LocatedBlocks getBlockLocations(NameNode namenode,
+      String src, long offset, long length) throws IOException {
+    return namenode.getNamesystem().getBlockLocations(
+        src, offset, length, false, true, true);
+  }
+
   public static boolean checkFileProgress(FSNamesystem fsn, String path, boolean checkall) throws IOException {
     INodeFile f = fsn.dir.getFileINode(path);
     return fsn.checkFileProgress(f, checkall);

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestBBWBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestBBWBlockReport.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestBBWBlockReport.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestBBWBlockReport.java Fri Jun 21 06:37:27 2013
@@ -48,10 +48,9 @@ public class TestBBWBlockReport {
 
   @Test(timeout = 60000)
   // timeout is mainly for safe mode
-  public void testDNShouldSendBBWReportIfAppendOn() throws Exception {
+  public void testDNShouldSendBBWReport() throws Exception {
     FileSystem fileSystem = null;
     FSDataOutputStream outStream = null;
-    conf.setBoolean("dfs.support.append", true);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     cluster.waitActive();
     try {
@@ -72,35 +71,6 @@ public class TestBBWBlockReport {
     }
   }
 
-  @Test
-  public void testDNShouldNotSendBBWReportIfAppendOff() throws Exception {
-    FileSystem fileSystem = null;
-    FSDataOutputStream outStream = null;
-    // disable the append support
-    conf.setBoolean("dfs.support.append", false);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
-    cluster.waitActive();
-    try {
-      fileSystem = cluster.getFileSystem();
-      // Keep open stream
-      outStream = writeFileAndSync(fileSystem, src, fileContent);
-      cluster.restartNameNode(false);
-      Thread.sleep(2000);
-      assertEquals(
-          "Able to read the synced block content after NameNode restart (without append support",
-          0, getFileContentFromDFS(fileSystem).length());
-    } finally {
-      // NN will not come out of safe mode. So exited the safemode forcibly to
-      // clean the resources.
-      cluster.getNameNode().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
-      if (null != fileSystem)
-        fileSystem.close();
-      if (null != outStream)
-        outStream.close();
-      cluster.shutdown();
-    }
-  }
-
   private String getFileContentFromDFS(FileSystem fs) throws IOException {
     ByteArrayOutputStream bio = new ByteArrayOutputStream();
     IOUtils.copyBytes(fs.open(src), bio, conf, true);

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java Fri Jun 21 06:37:27 2013
@@ -170,7 +170,7 @@ public class TestBlockTokenWithDFS exten
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.replication", numDataNodes);
     conf.setInt("ipc.client.connect.max.retries", 0);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     return conf;
   }
 

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java Fri Jun 21 06:37:27 2013
@@ -1,3 +1,18 @@
+/**
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.conf.Configuration;

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertTrue;
+
+import java.net.URL;
+import java.util.Collection;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.CorruptFileBlockInfo;
+import org.junit.Test;
+
+/** A JUnit test for corrupt_files.jsp */
+public class TestCorruptFilesJsp  {
+
+  @Test
+  public void testCorruptFilesJsp() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      final int FILE_SIZE = 512;
+      Path[] filepaths = { new Path("/audiobook"), new Path("/audio/audio1"),
+          new Path("/audio/audio2"), new Path("/audio/audio") };
+
+      Configuration conf = new Configuration();
+      // DataNode scans directories
+      conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
+      // DataNode sends block reports
+      conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000);
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster.waitActive();
+
+      FileSystem fs = cluster.getFileSystem();
+
+      // create files
+      for (Path filepath : filepaths) {
+        DFSTestUtil.createFile(fs, filepath, FILE_SIZE, (short) 1, 0L);
+        DFSTestUtil.waitReplication(fs, filepath, (short) 1);
+      }
+
+      // verify there are not corrupt files
+      Collection<CorruptFileBlockInfo> badFiles = cluster.getNameNode()
+          .getNamesystem().listCorruptFileBlocks();
+      assertTrue("There are " + badFiles.size()
+          + " corrupt files, but expecting none", badFiles.size() == 0);
+
+      String nnUrl = cluster.getNameNode().getHttpAddress().getHostName() + ":"
+          + cluster.getNameNode().getHttpAddress().getPort(); 
+      URL url = new URL("http://" + nnUrl + "/corrupt_files.jsp");
+      String corruptFilesPage = DFSTestUtil.urlGet(url);
+      assertTrue("Corrupt files page is not showing a healthy filesystem",
+          corruptFilesPage.contains("No missing blocks found at the moment."));
+
+      // Now corrupt all the files except for the last one
+      for (int idx = 0; idx < filepaths.length - 1; idx++) {
+        String blockName = DFSTestUtil.getFirstBlock(fs, filepaths[idx])
+            .getBlockName();
+        TestDatanodeBlockScanner.corruptReplica(blockName, 0);
+
+        // read the file so that the corrupt block is reported to NN
+        FSDataInputStream in = fs.open(filepaths[idx]);
+        try {
+          in.readFully(new byte[FILE_SIZE]);
+        } catch (ChecksumException ignored) { // checksum error is expected.
+        }
+        in.close();
+      }
+
+      // verify if all corrupt files were reported to NN
+      badFiles = cluster.getNameNode().getNamesystem().listCorruptFileBlocks();
+      assertTrue("Expecting 3 corrupt files, but got " + badFiles.size(),
+          badFiles.size() == 3);
+
+      corruptFilesPage = DFSTestUtil.urlGet(url);
+      assertTrue("'/audiobook' should be corrupt", corruptFilesPage
+          .contains("/audiobook"));
+      assertTrue("'/audio/audio1' should be corrupt", corruptFilesPage
+          .contains("/audio/audio1"));
+      assertTrue("'/audio/audio2' should be corrupt", corruptFilesPage
+          .contains("/audio/audio2"));
+      assertTrue("Summary message shall report 3 corrupt files",
+          corruptFilesPage.contains("At least 3 corrupt file(s)"));
+
+      // clean up
+      for (Path filepath : filepaths) {
+        fs.delete(filepath, false);
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+}

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestDFSConcurrentFileOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestDFSConcurrentFileOperations.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestDFSConcurrentFileOperations.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestDFSConcurrentFileOperations.java Fri Jun 21 06:37:27 2013
@@ -74,7 +74,7 @@ public class TestDFSConcurrentFileOperat
     Configuration conf = new Configuration();
     
     conf.setLong("dfs.block.size", blockSize);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     
     init(conf);
     

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Fri Jun 21 06:37:27 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.io.BytesWritable;
 
 /**
  * This class tests the creation and validation of a checkpoint.
@@ -172,7 +173,8 @@ public class TestEditLog extends TestCas
             fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
       File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
       System.out.println("Verifying file: " + editFile);
-      int numEdits = FSEditLog.loadFSEdits(new EditLogFileInputStream(editFile), -1);
+      int numEdits = FSEditLog.loadFSEdits(
+          new EditLogFileInputStream(editFile), -1, null);
       int numLeases = FSNamesystem.getFSNamesystem().leaseManager.countLease();
       System.out.println("Number of outstanding leases " + numLeases);
       assertEquals(0, numLeases);

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLogLoading.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLogLoading.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLogLoading.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLogLoading.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.Test;
+
+public class TestEditLogLoading {
+
+  private static final int NUM_DATA_NODES = 0;
+
+  @Test
+  public void testDisplayRecentEditLogOpCodes() throws IOException {
+    // start a cluster
+    Configuration conf = new Configuration();
+    conf.set("dfs.name.dir", new File(MiniDFSCluster.getBaseDir(), "name").getPath());
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_TOLERATION_LENGTH_KEY, -1);
+
+
+    MiniDFSCluster cluster = null;
+    FileSystem fileSys = null;
+    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, true, false, null, null);
+    cluster.waitActive();
+    fileSys = cluster.getFileSystem();
+    final FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
+
+    FSImage fsimage = namesystem.getFSImage();
+    final FSEditLog editLog = fsimage.getEditLog();
+    for (int i = 0; i < 20; i++) {
+      fileSys.mkdirs(new Path("/tmp/tmp" + i));
+    }
+    File editFile = editLog.getFsEditName();
+    System.out.println("edit log file: " + editFile);
+    editLog.close();
+    cluster.shutdown();
+
+    // Corrupt the edits file.
+    long fileLen = editFile.length();
+    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+    rwf.seek(fileLen - 40);
+    for (int i = 0; i < 20; i++) {
+      rwf.write((byte) 2); // FSEditLog.DELETE
+    }
+    rwf.close();
+
+    String expectedErrorMessage = "^Failed to parse edit log.*";
+    expectedErrorMessage += ", Recent opcode offsets=\\[(\\d+\\s*){4}\\]$";
+    try {
+      cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, null, null);
+      cluster.waitActive();
+      fail("should not be able to start");
+    } catch (IOException e) {
+      assertTrue("error message contains opcodes message",
+          e.getMessage().matches(expectedErrorMessage));
+    }
+  }
+}

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Fri Jun 21 06:37:27 2013
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -76,6 +77,9 @@ public class TestFsck extends TestCase {
     PrintStream newOut = new PrintStream(bStream, true);
     System.setOut(newOut);
     ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.ALL);
+    NameNode.LOG.debug("runFsck(expectedErrCode=" + expectedErrCode +
+        " ,checkErrorCode=" + checkErrorCode + ", path='" +
+        StringUtils.join(",", path) + "'");
     int errCode = ToolRunner.run(new DFSck(conf), path);
     if (checkErrorCode)
       assertEquals(expectedErrCode, errCode);
@@ -228,7 +232,8 @@ public class TestFsck extends TestCase {
     }
   }
 
-  public void testFsckMove() throws Exception {
+  public void testFsckMoveAndDelete() throws Exception {
+    final int NUM_MOVE_TRIES = 3;
     DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3, 8*1024);
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
@@ -248,6 +253,7 @@ public class TestFsck extends TestCase {
       String[] fileNames = util.getFileNames(topDir);
       DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                           cluster.getNameNodePort()), conf);
+      String corruptFileName = fileNames[0];
       String block = dfsClient.namenode.
                       getBlockLocations(fileNames[0], 0, Long.MAX_VALUE).
                       get(0).getBlock().getBlockName();
@@ -270,8 +276,23 @@ public class TestFsck extends TestCase {
         outStr = runFsck(conf, 1, false, "/");
       } 
       
-      // Fix the filesystem by moving corrupted files to lost+found
-      outStr = runFsck(conf, 1, true, "/", "-move");
+      // After a fsck -move, the corrupted file should still exist.
+      for (int retry = 0; retry < NUM_MOVE_TRIES; retry++) {
+        outStr = runFsck(conf, 1, true, "/", "-move" );
+        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+        String[] newFileNames = util.getFileNames(topDir);
+        boolean found = false;
+        for (String f : newFileNames) {
+          if (f.equals(corruptFileName)) {
+            found = true;
+            break;
+          }
+        }
+        assertTrue(found);
+      }
+
+      // Fix the filesystem by deleting corrupted files
+      outStr = runFsck(conf, 1, true, "/", "-delete");
       assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
       
       // Check to make sure we have healthy filesystem
@@ -431,7 +452,7 @@ public class TestFsck extends TestCase {
       DFSTestUtil.waitReplication(fs, filePath, (short)1);
       
       // intentionally corrupt NN data structure
-      INodeFile node = (INodeFile)cluster.getNameNode().namesystem.dir.rootDir.getNode(fileName);
+      INodeFile node = (INodeFile)cluster.getNameNode().getNamesystem().dir.rootDir.getNode(fileName);
       assertEquals(node.blocks.length, 1);
       node.blocks[0].setNumBytes(-1L);  // set the block length to be negative
       

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestHDFSConcat {
+  public static final Log LOG = LogFactory.getLog(TestHDFSConcat.class);
+
+  private static final short REPL_FACTOR = 2;
+  
+  private MiniDFSCluster cluster;
+  private NameNode nn;
+  private DistributedFileSystem dfs;
+
+  private static long blockSize = 512;
+
+  
+  private static Configuration conf;
+
+  static {
+    conf = new Configuration();
+    conf.setLong("dfs.block.size", blockSize);
+  }
+  
+  @Before
+  public void startUpCluster() throws IOException {
+    cluster = new MiniDFSCluster(conf, REPL_FACTOR, true, null);
+    assertNotNull("Failed Cluster Creation", cluster);
+    cluster.waitClusterUp();
+    dfs = (DistributedFileSystem) cluster.getFileSystem();
+    assertNotNull("Failed to get FileSystem", dfs);
+    nn = cluster.getNameNode();
+    assertNotNull("Failed to get NameNode", nn);
+  }
+
+  @After
+  public void shutDownCluster() throws IOException {
+    if(dfs != null) {
+      dfs.close();
+    }
+    if(cluster != null) {
+      cluster.shutdownDataNodes();
+      cluster.shutdown();
+    }
+  } 
+
+  /**
+   * Concatenates 10 files into one
+   * Verifies the final size, deletion of the file, number of blocks
+   * @throws IOException
+   * @throws InterruptedException 
+   */
+  @Test
+  public void testConcat() throws IOException, InterruptedException {
+    final int numFiles = 10;
+    long fileLen = blockSize*3;
+    HdfsFileStatus fStatus;
+    FSDataInputStream stm;
+    
+    String trg = new String("/trg");
+    Path trgPath = new Path(trg);
+    DFSTestUtil.createFile(dfs, trgPath, fileLen, REPL_FACTOR, 1);
+    fStatus  = nn.getFileInfo(trg);
+    long trgLen = fStatus.getLen();
+    long trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
+       
+    Path [] files = new Path[numFiles];
+    byte [] [] bytes = new byte [numFiles][(int)fileLen];
+    LocatedBlocks [] lblocks = new LocatedBlocks[numFiles];
+    long [] lens = new long [numFiles];
+    
+    
+    int i = 0;
+    for(i=0; i<files.length; i++) {
+      files[i] = new Path("/file"+i);
+      Path path = files[i];
+      System.out.println("Creating file " + path);
+      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
+    
+      fStatus = nn.getFileInfo(path.toUri().getPath());
+      lens[i] = fStatus.getLen();
+      assertEquals(trgLen, lens[i]); // file of the same length.
+      
+      lblocks[i] = nn.getBlockLocations(path.toUri().getPath(), 0, lens[i]);
+      
+      //read the file
+      stm = dfs.open(path);
+      stm.readFully(0, bytes[i]);
+      //bytes[i][10] = 10;
+      stm.close();
+    }
+    
+    // check permissions -try the operation with the "wrong" user
+    final UserGroupInformation user1 = UserGroupInformation.createUserForTesting(
+        "theDoctor", new String[] { "tardis" });
+    DistributedFileSystem hdfs = 
+        (DistributedFileSystem)DFSTestUtil.getFileSystemAs(user1, conf);
+    try {
+      hdfs.concat(trgPath, files);
+      fail("Permission exception expected");
+    } catch (IOException ie) {
+      System.out.println("Got expected exception for permissions:"
+          + ie.getLocalizedMessage());
+      // expected
+    }
+    
+    // check count update
+    ContentSummary cBefore = dfs.getContentSummary(trgPath.getParent());
+    
+    // now concatenate
+    dfs.concat(trgPath, files);
+    
+    // verify  count
+    ContentSummary cAfter = dfs.getContentSummary(trgPath.getParent());
+    assertEquals(cBefore.getFileCount(), cAfter.getFileCount()+files.length);
+    
+    // verify other stuff
+    long totalLen = trgLen;
+    long totalBlocks = trgBlocks;
+    for(i=0; i<files.length; i++) {
+      totalLen += lens[i];
+      totalBlocks += lblocks[i].locatedBlockCount();
+    }
+    System.out.println("total len=" + totalLen + "; totalBlocks=" + totalBlocks);
+    
+    
+    fStatus = nn.getFileInfo(trg);
+    trgLen  = fStatus.getLen(); // new length
+    
+    // read the resulting file
+    stm = dfs.open(trgPath);
+    byte[] byteFileConcat = new byte[(int)trgLen];
+    stm.readFully(0, byteFileConcat);
+    stm.close();
+    
+    trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
+    
+    //verifications
+    // 1. number of blocks
+    assertEquals(trgBlocks, totalBlocks); 
+        
+    // 2. file lengths
+    assertEquals(trgLen, totalLen);
+    
+    // 3. removal of the src file
+    for(Path p: files) {
+      fStatus = nn.getFileInfo(p.toUri().getPath());
+      assertNull("File " + p + " still exists", fStatus); // file shouldn't exist
+      // try to create fie with the same name
+      DFSTestUtil.createFile(dfs, p, fileLen, REPL_FACTOR, 1); 
+    }
+  
+    // 4. content
+    checkFileContent(byteFileConcat, bytes);
+    
+    // add a small file (less then a block)
+    Path smallFile = new Path("/sfile");
+    int sFileLen = 10;
+    DFSTestUtil.createFile(dfs, smallFile, sFileLen, REPL_FACTOR, 1);
+    dfs.concat(trgPath, new Path [] {smallFile});
+    
+    fStatus = nn.getFileInfo(trg);
+    trgLen  = fStatus.getLen(); // new length
+    
+    // check number of blocks
+    trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
+    assertEquals(trgBlocks, totalBlocks+1);
+    
+    // and length
+    assertEquals(trgLen, totalLen+sFileLen);
+    
+  }
+
+  // compare content
+  private void checkFileContent(byte[] concat, byte[][] bytes ) {
+    int idx=0;
+    boolean mismatch = false;
+    
+    for(byte [] bb: bytes) {
+      for(byte b: bb) {
+        if(b != concat[idx++]) {
+          mismatch=true;
+          break;
+        }
+      }
+      if(mismatch)
+        break;
+    }
+    assertFalse("File content of concatenated file is different", mismatch);
+  }
+
+
+  // test case when final block is not of a full length
+  @Test
+  public void testConcatNotCompleteBlock() throws IOException {
+    long trgFileLen = blockSize*3;
+    long srcFileLen = blockSize*3+20; // block at the end - not full
+
+    
+    // create first file
+    String name1="/trg", name2="/src";
+    Path filePath1 = new Path(name1);
+    DFSTestUtil.createFile(dfs, filePath1, trgFileLen, REPL_FACTOR, 1);
+    
+    HdfsFileStatus fStatus = cluster.getNameNode().getFileInfo(name1);
+    long fileLen = fStatus.getLen();
+    assertEquals(fileLen, trgFileLen);
+    
+    //read the file
+    FSDataInputStream stm = dfs.open(filePath1);
+    byte[] byteFile1 = new byte[(int)trgFileLen];
+    stm.readFully(0, byteFile1);
+    stm.close();
+    
+    LocatedBlocks lb1 = cluster.getNameNode().getBlockLocations(name1, 0, trgFileLen);
+    
+    Path filePath2 = new Path(name2);
+    DFSTestUtil.createFile(dfs, filePath2, srcFileLen, REPL_FACTOR, 1);
+    fStatus = cluster.getNameNode().getFileInfo(name2);
+    fileLen = fStatus.getLen();
+    assertEquals(srcFileLen, fileLen);
+    
+    // read the file
+    stm = dfs.open(filePath2);
+    byte[] byteFile2 = new byte[(int)srcFileLen];
+    stm.readFully(0, byteFile2);
+    stm.close();
+    
+    LocatedBlocks lb2 = cluster.getNameNode().getBlockLocations(name2, 0, srcFileLen);
+    
+    
+    System.out.println("trg len="+trgFileLen+"; src len="+srcFileLen);
+    
+    // move the blocks
+    dfs.concat(filePath1, new Path [] {filePath2});
+    
+    long totalLen = trgFileLen + srcFileLen;
+    fStatus = cluster.getNameNode().getFileInfo(name1);
+    fileLen = fStatus.getLen();
+    
+    // read the resulting file
+    stm = dfs.open(filePath1);
+    byte[] byteFileConcat = new byte[(int)fileLen];
+    stm.readFully(0, byteFileConcat);
+    stm.close();
+    
+    LocatedBlocks lbConcat = cluster.getNameNode().getBlockLocations(name1, 0, fileLen);
+    
+    //verifications
+    // 1. number of blocks
+    assertEquals(lbConcat.locatedBlockCount(), 
+        lb1.locatedBlockCount() + lb2.locatedBlockCount());
+    
+    // 2. file lengths
+    System.out.println("file1 len="+fileLen+"; total len="+totalLen);
+    assertEquals(fileLen, totalLen);
+    
+    // 3. removal of the src file
+    fStatus = cluster.getNameNode().getFileInfo(name2);
+    assertNull("File "+name2+ "still exists", fStatus); // file shouldn't exist
+  
+    // 4. content
+    checkFileContent(byteFileConcat, new byte [] [] {byteFile1, byteFile2});
+  }
+  
+  /**
+   * test illegal args cases
+   */
+  @Test
+  public void testIllegalArg() throws IOException {
+    long fileLen = blockSize*3;
+    
+    Path parentDir  = new Path ("/parentTrg");
+    assertTrue(dfs.mkdirs(parentDir));
+    Path trg = new Path(parentDir, "trg");
+    DFSTestUtil.createFile(dfs, trg, fileLen, REPL_FACTOR, 1);
+
+    // must be in the same dir
+    {
+      // create first file
+      Path dir1 = new Path ("/dir1");
+      assertTrue(dfs.mkdirs(dir1));
+      Path src = new Path(dir1, "src");
+      DFSTestUtil.createFile(dfs, src, fileLen, REPL_FACTOR, 1);
+      
+      try {
+        dfs.concat(trg, new Path [] {src});
+        fail("didn't fail for src and trg in different directories");
+      } catch (Exception e) {
+        // expected
+      }
+    }
+    // non existing file
+    try {
+      dfs.concat(trg, new Path [] {new Path("test1/a")}); // non existing file
+      fail("didn't fail with invalid arguments");
+    } catch (Exception e) {
+      //expected
+    }
+    // empty arg list
+    try {
+      dfs.concat(trg, new Path [] {}); // empty array
+      fail("didn't fail with invalid arguments");
+    } catch (Exception e) {
+      // exspected
+    }
+ 
+  }
+}

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java Fri Jun 21 06:37:27 2013
@@ -1,3 +1,18 @@
+/**
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.ArrayList;



Mime
View raw message