hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From lium...@apache.org
Subject hadoop git commit: HDFS-11122. TestDFSAdmin#testReportCommand fails due to timed out. Contributed by Yiqun Lin
Date Fri, 11 Nov 2016 20:33:34 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bde95f90e -> 289f03e49


HDFS-11122. TestDFSAdmin#testReportCommand fails due to timed out. Contributed by Yiqun Lin

(cherry picked from commit aa6010ccca3045ce9f0bb819fb2cb7ff65e1822b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/289f03e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/289f03e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/289f03e4

Branch: refs/heads/branch-2
Commit: 289f03e49e911429a58c9952bc152b62355b5e1a
Parents: bde95f9
Author: Mingliang Liu <liuml07@apache.org>
Authored: Fri Nov 11 12:29:47 2016 -0800
Committer: Mingliang Liu <liuml07@apache.org>
Committed: Fri Nov 11 12:30:22 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 16 ++++++++++++++--
 1 file changed, 14 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/289f03e4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 1e861e5..8acf0c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -31,6 +31,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationUtil;
+import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -46,6 +47,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ToolRunner;
@@ -69,6 +71,7 @@ import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
@@ -472,7 +475,7 @@ public class TestDFSAdmin {
     return sb.toString();
   }
 
-  @Test(timeout = 30000)
+  @Test(timeout = 120000)
   public void testReportCommand() throws Exception {
     redirectStream();
 
@@ -530,6 +533,14 @@ public class TestDFSAdmin {
       assertEquals("Fail to corrupt all replicas for block " + block,
           replFactor, blockFilesCorrupted);
 
+      try {
+        IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(),
+            conf, true);
+        fail("Should have failed to read the file with corrupted blocks.");
+      } catch (ChecksumException ignored) {
+        // expected exception reading corrupt blocks
+      }
+
       /*
        * Increase replication factor, this should invoke transfer request.
        * Receiving datanode fails on checksum and reports it to namenode
@@ -542,6 +553,7 @@ public class TestDFSAdmin {
         public Boolean get() {
           LocatedBlocks blocks = null;
           try {
+            miniCluster.triggerBlockReports();
             blocks = client.getNamenode().getBlockLocations(file.toString(), 0,
                 Long.MAX_VALUE);
           } catch (IOException e) {
@@ -549,7 +561,7 @@ public class TestDFSAdmin {
           }
           return blocks != null && blocks.get(0).isCorrupt();
         }
-      }, 100, 60000);
+      }, 1000, 60000);
 
       BlockManagerTestUtil.updateState(
           miniCluster.getNameNode().getNamesystem().getBlockManager());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message