hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1383030 [2/2] - in /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs/ hadoop-hdfs/src/ hadoop-hdfs/src/contrib/bkjournal/ hadoop-hdfs/src/contrib/bkjournal/dev-support/ hadoop-hdfs/src/contrib/bkjournal...
Date Mon, 10 Sep 2012 18:45:53 GMT
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java?rev=1383030&r1=1383029&r2=1383030&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java
(original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java
Mon Sep 10 18:45:45 2012
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.server.da
 
 import java.io.IOException;
 
+import junit.framework.Assert;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -31,7 +34,13 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner;
+import static org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.SLEEP_PERIOD_MS;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 import org.junit.Test;
+import org.junit.Ignore;
+import static org.junit.Assert.fail;
 
 
 public class TestMultipleNNDataBlockScanner {
@@ -166,4 +175,75 @@ public class TestMultipleNNDataBlockScan
       cluster.shutdown();
     }
   }
+  
+  @Test
+  public void test2NNBlockRescanInterval() throws IOException {
+    ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
+    Configuration conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
+        .build();
+
+    try {
+      FileSystem fs = cluster.getFileSystem(1);
+      Path file2 = new Path("/test/testBlockScanInterval");
+      DFSTestUtil.createFile(fs, file2, 30, (short) 1, 0);
+
+      fs = cluster.getFileSystem(0);
+      Path file1 = new Path("/test/testBlockScanInterval");
+      DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
+      for (int i = 0; i < 8; i++) {
+        LOG.info("Verifying that the blockscanner scans exactly once");
+        waitAndScanBlocks(1, 1);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * HDFS-3828: DN rescans blocks too frequently
+   * 
+   * @throws Exception
+   */
+  @Test
+  public void testBlockRescanInterval() throws IOException {
+    ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
+    Configuration conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf).build();
+
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      Path file1 = new Path("/test/testBlockScanInterval");
+      DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
+      for (int i = 0; i < 4; i++) {
+        LOG.info("Verifying that the blockscanner scans exactly once");
+        waitAndScanBlocks(1, 1);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  void waitAndScanBlocks(long scansLastRun, long scansTotal)
+      throws IOException {
+    // DataBlockScanner will run for every 5 seconds so we are checking for
+    // every 5 seconds
+    int n = 5;
+    String bpid = cluster.getNamesystem(0).getBlockPoolId();
+    DataNode dn = cluster.getDataNodes().get(0);
+    long blocksScanned, total;
+    do {
+      try {
+        Thread.sleep(SLEEP_PERIOD_MS);
+      } catch (InterruptedException e) {
+        fail("Interrupted: " + e);
+      }
+      blocksScanned = dn.blockScanner.getBlocksScannedInLastRun(bpid);
+      total = dn.blockScanner.getTotalScans(bpid);
+      LOG.info("bpid = " + bpid + " blocksScanned = " + blocksScanned + " total=" + total);
+    } while (n-- > 0 && (blocksScanned != scansLastRun || scansTotal != total));
+    Assert.assertEquals(scansTotal, total);
+    Assert.assertEquals(scansLastRun, blocksScanned);
+  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?rev=1383030&r1=1383029&r2=1383030&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
(original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
Mon Sep 10 18:45:45 2012
@@ -17,17 +17,27 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.SocketTimeoutException;
+import java.net.URL;
 import java.util.Collections;
 import java.util.List;
 
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServerFunctionalTest;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
@@ -100,4 +110,45 @@ public class TestTransferFsImage {
       cluster.shutdown();      
     }
   }
+
+  /**
+   * Test to verify the read timeout
+   */
+  @Test(timeout = 5000)
+  public void testImageTransferTimeout() throws Exception {
+    HttpServer testServer = HttpServerFunctionalTest.createServer("hdfs");
+    try {
+      testServer.addServlet("GetImage", "/getimage", TestGetImageServlet.class);
+      testServer.start();
+      URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
+      TransferFsImage.timeout = 2000;
+      try {
+        TransferFsImage.getFileClient(serverURL.getAuthority(), "txid=1", null,
+            null, false);
+        fail("TransferImage Should fail with timeout");
+      } catch (SocketTimeoutException e) {
+        assertEquals("Read should timeout", "Read timed out", e.getMessage());
+      }
+    } finally {
+      if (testServer != null) {
+        testServer.stop();
+      }
+    }
+  }
+
+  public static class TestGetImageServlet extends HttpServlet {
+    private static final long serialVersionUID = 1L;
+
+    @Override
+    protected void doGet(HttpServletRequest req, HttpServletResponse resp)
+        throws ServletException, IOException {
+      synchronized (this) {
+        try {
+          wait(5000);
+        } catch (InterruptedException e) {
+          // Ignore
+        }
+      }
+    }
+  }
 }



Mime
View raw message