hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r957112 - in /hbase/trunk: CHANGES.txt src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
Date Wed, 23 Jun 2010 05:26:38 GMT
Author: todd
Date: Wed Jun 23 05:26:38 2010
New Revision: 957112

URL: http://svn.apache.org/viewvc?rev=957112&view=rev
Log:
HBASE-2767. Fix reflection in tests that was made incompatible by HDFS-1209

Modified:
    hbase/trunk/CHANGES.txt
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java

Modified: hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/trunk/CHANGES.txt?rev=957112&r1=957111&r2=957112&view=diff
==============================================================================
--- hbase/trunk/CHANGES.txt (original)
+++ hbase/trunk/CHANGES.txt Wed Jun 23 05:26:38 2010
@@ -412,6 +412,7 @@ Release 0.21.0 - Unreleased
    HBASE-2763  Cross-port HADOOP-6833 IPC parameter leak bug
    HBASE-2758  META region stuck in RS2ZK_REGION_OPENED state
                (Karthik Ranganathan via jgray)
+   HBASE-2767  Fix reflection in tests that was made incompatible by HDFS-1209
 
   IMPROVEMENTS
    HBASE-1760  Cleanup TODOs in HTable

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=957112&r1=957111&r2=957112&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Wed Jun 23
05:26:38 2010
@@ -73,7 +73,7 @@ import com.google.common.base.Preconditi
  * logging levels nor make changes to configuration parameters.
  */
 public class HBaseTestingUtility {
-  private final Log LOG = LogFactory.getLog(getClass());
+  private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
   private final Configuration conf;
   private MiniZooKeeperCluster zkCluster = null;
   private MiniDFSCluster dfsCluster = null;
@@ -888,7 +888,7 @@ public class HBaseTestingUtility {
   }
 
   /**
-   * Set maxRecoveryErrorCount in DFSClient.  Currently its hard-coded to 5 and
+   * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
    * makes tests linger.  Here is the exception you'll see:
    * <pre>
    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/hlog.1276627923013
block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for
block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
@@ -901,20 +901,23 @@ public class HBaseTestingUtility {
    * @throws IllegalArgumentException 
    */
   public static void setMaxRecoveryErrorCount(final OutputStream stream,
-      final int max)
-  throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException
{
-    Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
-    for (Class<?> clazz: clazzes) {
-      String className = clazz.getSimpleName();
-      if (className.equals("DFSOutputStream")) {
-        if (clazz.isInstance(stream)) {
-          Field maxRecoveryErrorCountField =
-            stream.getClass().getDeclaredField("maxRecoveryErrorCount");
-          maxRecoveryErrorCountField.setAccessible(true);
-          maxRecoveryErrorCountField.setInt(stream, max);
-          break;
+      final int max) {
+    try {
+      Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
+      for (Class<?> clazz: clazzes) {
+        String className = clazz.getSimpleName();
+        if (className.equals("DFSOutputStream")) {
+          if (clazz.isInstance(stream)) {
+            Field maxRecoveryErrorCountField =
+              stream.getClass().getDeclaredField("maxRecoveryErrorCount");
+            maxRecoveryErrorCountField.setAccessible(true);
+            maxRecoveryErrorCountField.setInt(stream, max);
+            break;
+          }
         }
       }
+    } catch (Exception e) {
+      LOG.info("Could not set max recovery field", e);
     }
   }
 }

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java?rev=957112&r1=957111&r2=957112&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
(original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
Wed Jun 23 05:26:38 2010
@@ -26,6 +26,8 @@ import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -50,12 +52,12 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mortbay.log.Log;
 
 /**
  * Test replay of edits out of a WAL split.
  */
 public class TestWALReplay {
+  public static final Log LOG = LogFactory.getLog(TestWALReplay.class);
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private final EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
   private Path hbaseRootDir = null;
@@ -68,14 +70,14 @@ public class TestWALReplay {
   public static void setUpBeforeClass() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setBoolean("dfs.support.append", true);
-    // The below config not supported until 
+    // The below config supported by 0.20-append and CDH3b2
     conf.setInt("dfs.client.block.recovery.retries", 2);
     conf.setInt("hbase.regionserver.flushlogentries", 1);
     TEST_UTIL.startMiniDFSCluster(3);
     TEST_UTIL.setNameNodeNameSystemLeasePeriod(100, 10000);
     Path hbaseRootDir =
       TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
-    Log.info("hbase.rootdir=" + hbaseRootDir);
+    LOG.info("hbase.rootdir=" + hbaseRootDir);
     conf.set(HConstants.HBASE_DIR, hbaseRootDir.toString());
   }
 
@@ -411,7 +413,7 @@ public class TestWALReplay {
     assertEquals(1, splits.size());
     // Make sure the file exists
     assertTrue(fs.exists(splits.get(0)));
-    Log.info("Split file=" + splits.get(0));
+    LOG.info("Split file=" + splits.get(0));
     return splits.get(0);
   }
 
@@ -424,13 +426,7 @@ public class TestWALReplay {
     HLog wal = new HLog(FileSystem.get(c), logDir, oldLogDir, c, null);
     // Set down maximum recovery so we dfsclient doesn't linger retrying something
     // long gone.
-    try {
-      HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
-    } catch (Exception e) {
-      // These exceptions should never happen... make RuntimeException of them
-      // if they do.
-      throw new RuntimeException(e);
-    }
+    HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
     return wal;
   }
 }
\ No newline at end of file



Mime
View raw message