hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From la...@apache.org
Subject svn commit: r1425526 [6/6] - in /hbase/branches/0.94-test: ./ bin/ conf/ security/src/main/java/org/apache/hadoop/hbase/ipc/ security/src/main/java/org/apache/hadoop/hbase/security/access/ security/src/test/java/org/apache/hadoop/hbase/security/access/...
Date Sun, 23 Dec 2012 20:55:55 GMT
Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java?rev=1425526&r1=1425525&r2=1425526&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java Sun
Dec 23 20:55:53 2012
@@ -90,26 +90,20 @@ public class TestFSUtils {
       // given the default replication factor is 3, the same as the number of
       // datanodes; the locality index for each host should be 100%,
       // or getWeight for each host should be the same as getUniqueBlocksWeights
-      final long maxTime = System.currentTimeMillis() + 2000;
-      boolean ok;
-      do {
-        ok = true;
-        FileStatus status = fs.getFileStatus(testFile);
-        HDFSBlocksDistribution blocksDistribution =
-          FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
-        long uniqueBlocksTotalWeight =
-          blocksDistribution.getUniqueBlocksTotalWeight();
-        for (String host : hosts) {
-          long weight = blocksDistribution.getWeight(host);
-          ok = (ok && uniqueBlocksTotalWeight == weight);
-        }
-      } while (!ok && System.currentTimeMillis() < maxTime);
-      assertTrue(ok);
-      } finally {
+      FileStatus status = fs.getFileStatus(testFile);
+      HDFSBlocksDistribution blocksDistribution =
+        FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
+      long uniqueBlocksTotalWeight =
+        blocksDistribution.getUniqueBlocksTotalWeight();
+      for (String host : hosts) {
+        long weight = blocksDistribution.getWeight(host);
+        assertTrue(uniqueBlocksTotalWeight == weight);
+      }
+    } finally {
       htu.shutdownMiniDFSCluster();
     }
 
-
+    
     try {
       // set up a cluster with 4 nodes
       String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
@@ -124,22 +118,16 @@ public class TestFSUtils {
       // given the default replication factor is 3, we will have total of 9
       // replica of blocks; thus the host with the highest weight should have
       // weight == 3 * DEFAULT_BLOCK_SIZE
-      final long maxTime = System.currentTimeMillis() + 2000;
-      long weight;
-      long uniqueBlocksTotalWeight;
-      do {
-        FileStatus status = fs.getFileStatus(testFile);
-        HDFSBlocksDistribution blocksDistribution =
-          FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
-        uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight();
-
-        String tophost = blocksDistribution.getTopHosts().get(0);
-        weight = blocksDistribution.getWeight(tophost);
-
-        // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
-      } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() <
maxTime);
+      FileStatus status = fs.getFileStatus(testFile);
+      HDFSBlocksDistribution blocksDistribution =
+        FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
+      long uniqueBlocksTotalWeight =
+        blocksDistribution.getUniqueBlocksTotalWeight();
+      
+      String tophost = blocksDistribution.getTopHosts().get(0);
+      long weight = blocksDistribution.getWeight(tophost);
       assertTrue(uniqueBlocksTotalWeight == weight);
-
+      
     } finally {
       htu.shutdownMiniDFSCluster();
     }
@@ -158,16 +146,11 @@ public class TestFSUtils {
       
       // given the default replication factor is 3, we will have total of 3
       // replica of blocks; thus there is one host without weight
-      final long maxTime = System.currentTimeMillis() + 2000;
-      HDFSBlocksDistribution blocksDistribution;
-      do {
-        FileStatus status = fs.getFileStatus(testFile);
-        blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
-        // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
-      }
-      while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis()
< maxTime);
+      FileStatus status = fs.getFileStatus(testFile);
+      HDFSBlocksDistribution blocksDistribution =
+        FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
       assertEquals("Wrong number of hosts distributing blocks.", 3,
-        blocksDistribution.getTopHosts().size());
+          blocksDistribution.getTopHosts().size());
     } finally {
       htu.shutdownMiniDFSCluster();
     }

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java?rev=1425526&r1=1425525&r2=1425526&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
(original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
Sun Dec 23 20:55:53 2012
@@ -1658,35 +1658,6 @@ public class TestHBaseFsck {
   }
 
   /**
-   * Test fixing lingering reference file.
-   */
-  @Test
-  public void testLingeringReferenceFile() throws Exception {
-    String table = "testLingeringReferenceFile";
-    try {
-      setupTable(table);
-      assertEquals(ROWKEYS.length, countRows());
-
-      // Mess it up by creating a fake reference file
-      FileSystem fs = FileSystem.get(conf);
-      Path tableDir= FSUtils.getTablePath(FSUtils.getRootDir(conf), table);
-      Path regionDir = FSUtils.getRegionDirs(fs, tableDir).get(0);
-      Path famDir = new Path(regionDir, FAM_STR);
-      Path fakeReferenceFile = new Path(famDir, "fbce357483ceea.12144538");
-      fs.create(fakeReferenceFile);
-
-      HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.LINGERING_REFERENCE_HFILE });
-      // fix reference file
-      doFsck(conf, true);
-      // check that reference file fixed
-      assertNoErrors(doFsck(conf, false));
-    } finally {
-      deleteTable(table);
-    }
-  }
-
-  /**
    * Test pluggable error reporter. It can be plugged in
    * from system property or configuration.
    */

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java?rev=1425526&r1=1425525&r2=1425526&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
(original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
Sun Dec 23 20:55:53 2012
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.util.HBas
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
 
 public class HbckTestingUtil {
-  private static ExecutorService exec = new ScheduledThreadPoolExecutor(10);
   public static HBaseFsck doFsck(
       Configuration conf, boolean fix) throws Exception {
     return doFsck(conf, fix, null);
@@ -38,14 +37,14 @@ public class HbckTestingUtil {
 
   public static HBaseFsck doFsck(
       Configuration conf, boolean fix, String table) throws Exception {
-    return doFsck(conf, fix, fix, fix, fix,fix, fix, fix, fix, table);
+    return doFsck(conf, fix, fix, fix, fix,fix, fix, fix, table);
   }
 
   public static HBaseFsck doFsck(Configuration conf, boolean fixAssignments,
       boolean fixMeta, boolean fixHdfsHoles, boolean fixHdfsOverlaps,
       boolean fixHdfsOrphans, boolean fixTableOrphans, boolean fixVersionFile,
-      boolean fixReferenceFiles, String table) throws Exception {
-    HBaseFsck fsck = new HBaseFsck(conf, exec);
+      String table) throws Exception {
+    HBaseFsck fsck = new HBaseFsck(conf);
     fsck.connect();
     fsck.setDisplayFullReport(); // i.e. -details
     fsck.setTimeLag(0);
@@ -56,7 +55,6 @@ public class HbckTestingUtil {
     fsck.setFixHdfsOrphans(fixHdfsOrphans);
     fsck.setFixTableOrphans(fixTableOrphans);
     fsck.setFixVersionFile(fixVersionFile);
-    fsck.setFixReferenceFiles(fixReferenceFiles);
     if (table != null) {
       fsck.includeTable(table);
     }
@@ -73,6 +71,7 @@ public class HbckTestingUtil {
    */
   public static HBaseFsck doHFileQuarantine(Configuration conf, String table) throws Exception
{
     String[] args = {"-sidelineCorruptHFiles", "-ignorePreCheckPermission", table};
+    ExecutorService exec = new ScheduledThreadPoolExecutor(10);
     HBaseFsck hbck = new HBaseFsck(conf, exec);
     hbck.exec(exec, args);
     return hbck;

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java?rev=1425526&r1=1425525&r2=1425526&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java
(original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java
Sun Dec 23 20:55:53 2012
@@ -27,7 +27,6 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.zookeeper.ZKTable.TableState;
 import org.apache.zookeeper.KeeperException;
@@ -111,9 +110,14 @@ public class TestZKTable {
     assertFalse(zkt.isTablePresent(name));
   }
 
-  private void runTest9294CompatibilityTest(String tableName, Configuration conf)
-  throws Exception {
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf,
+  /**
+   * Test that ZK table writes table state in formats expected by 0.92 and 0.94 clients
+   */
+  @Test
+  public void test9294Compatibility() throws Exception {
+    final String tableName = "test9294Compatibility";
+
+    ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
       tableName, abortable, true);
     ZKTable zkt = new ZKTable(zkw);
     zkt.setEnabledTable(tableName);
@@ -125,22 +129,6 @@ public class TestZKTable {
   }
 
   /**
-   * Test that ZK table writes table state in formats expected by 0.92 and 0.94 clients
-   */
-  @Test
-  public void test9294Compatibility() throws Exception {
-    // without useMulti
-    String tableName = "test9294Compatibility";
-    runTest9294CompatibilityTest(tableName, TEST_UTIL.getConfiguration());
-
-    // with useMulti
-    tableName = "test9294CompatibilityWithMulti";
-    Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
-    conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true);
-    runTest9294CompatibilityTest(tableName, conf);
-  }
-
-  /**
    * RecoverableZookeeper that throws a KeeperException after throwExceptionInNumOperations
    */
   class ThrowingRecoverableZookeeper extends RecoverableZooKeeper {
@@ -182,17 +170,14 @@ public class TestZKTable {
    * Because two ZooKeeper nodes are written for each table state transition
    * {@link ZooKeeperWatcher#masterTableZNode} and {@link ZooKeeperWatcher#masterTableZNode92}
    * it is possible that we fail in between the two operations and are left with
-   * inconsistent state (when hbase.zookeeper.useMulti is false).
-   * Check that we can get back to a consistent state by retrying the operation.
+   * inconsistent state.  Check that we can get back to a consistent state by
+   * retrying the operation.
    */
   @Test
   public void testDisableTableRetry() throws Exception {
     final String tableName = "testDisableTableRetry";
 
-    Configuration conf = TEST_UTIL.getConfiguration();
-    // test only relevant if useMulti is false
-    conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, false);
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf,
+    ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
       tableName, abortable, true);
     ThrowingRecoverableZookeeper throwing = new ThrowingRecoverableZookeeper(zkw);
     ZooKeeperWatcher spyZookeeperWatcher = Mockito.spy(zkw);
@@ -226,10 +211,7 @@ public class TestZKTable {
   public void testEnableTableRetry() throws Exception {
     final String tableName = "testEnableTableRetry";
 
-    Configuration conf = TEST_UTIL.getConfiguration();
-    // test only relevant if useMulti is false
-    conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, false);
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf,
+    ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
       tableName, abortable, true);
     ThrowingRecoverableZookeeper throwing = new ThrowingRecoverableZookeeper(zkw);
     ZooKeeperWatcher spyZookeeperWatcher = Mockito.spy(zkw);
@@ -263,10 +245,7 @@ public class TestZKTable {
   public void testDeleteTableRetry() throws Exception {
     final String tableName = "testEnableTableRetry";
 
-    Configuration conf = TEST_UTIL.getConfiguration();
-    // test only relevant if useMulti is false
-    conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, false);
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf,
+    ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
       tableName, abortable, true);
     ThrowingRecoverableZookeeper throwing = new ThrowingRecoverableZookeeper(zkw);
     ZooKeeperWatcher spyZookeeperWatcher = Mockito.spy(zkw);

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableReadOnly.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableReadOnly.java?rev=1425526&r1=1425525&r2=1425526&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableReadOnly.java
(original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableReadOnly.java
Sun Dec 23 20:55:53 2012
@@ -68,53 +68,33 @@ public class TestZKTableReadOnly {
     return ZKTableReadOnly.isEnabledTable(zkw, tableName);
   }
 
-  private void runClientCompatiblityWith92ZNodeTest(String tableName, Configuration conf)
-  throws Exception {
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf,
-      tableName, abortable, true);
-    assertTrue(enableAndCheckEnabled(zkw, tableName));
-  }
   /**
-   * Test that client ZK reader can handle the 0.92 table format znode.
+   * Test that client ZK reader can handle the 0.92 table znode format.
    */
   @Test
   public void testClientCompatibilityWith92ZNode() throws Exception {
-    // test without useMulti
-    String tableName = "testClientCompatibilityWith92ZNode";
+    final String tableName = "testClientCompatibilityWith92ZNode";
+
     // Set the client to read from the 0.92 table znode format
     Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
     String znode92 = conf.get("zookeeper.znode.masterTableEnableDisable92", "table92");
     conf.set("zookeeper.znode.clientTableEnableDisable", znode92);
-    runClientCompatiblityWith92ZNodeTest(tableName, conf);
-
-    // test with useMulti
-    tableName = "testClientCompatibilityWith92ZNodeUseMulti";
-    conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true);
-    runClientCompatiblityWith92ZNodeTest(tableName, conf);
-  }
 
-  private void runClientCompatibilityWith94ZNodeTest(String tableName, Configuration conf)
-  throws Exception {
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
+    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf,
       tableName, abortable, true);
     assertTrue(enableAndCheckEnabled(zkw, tableName));
   }
 
   /**
-   * Test that client ZK reader can handle the current (0.94) table format znode.
+   * Test that client ZK reader can handle the current (0.94) table format znode
    */
   @Test
   public void testClientCompatibilityWith94ZNode() throws Exception {
-    String tableName = "testClientCompatibilityWith94ZNode";
+    final String tableName = "testClientCompatibilityWith94ZNode";
 
-    // without useMulti
-    runClientCompatibilityWith94ZNodeTest(tableName, TEST_UTIL.getConfiguration());
-
-    // with useMulti
-    tableName = "testClientCompatiblityWith94ZNodeUseMulti";
-    Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
-    conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true);
-    runClientCompatibilityWith94ZNodeTest(tableName, conf);
+    ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
+      tableName, abortable, true);
+    assertTrue(enableAndCheckEnabled(zkw, tableName));
   }
 
   @org.junit.Rule



Mime
View raw message