hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nspiegelb...@apache.org
Subject svn commit: r1176171 [8/9] - in /hbase/branches/0.89: ./ bin/ src/ src/assembly/ src/docs/src/documentation/content/xdocs/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/client/ src/main/java/org/apache/hadoop/hbase/filter...
Date Tue, 27 Sep 2011 02:41:20 GMT
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreReconstruction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreReconstruction.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreReconstruction.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreReconstruction.java Tue Sep 27 02:41:16 2011
@@ -91,21 +91,21 @@ public class TestStoreReconstruction {
    */
   @Test
   public void runReconstructionLog() throws Exception {
-
     byte[] family = Bytes.toBytes("column");
     HColumnDescriptor hcd = new HColumnDescriptor(family);
     HTableDescriptor htd = new HTableDescriptor(TABLE);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(htd, null, null, false);
     Path oldLogDir = new Path(this.dir, HConstants.HREGION_OLDLOGDIR_NAME);
-    HLog log = new HLog(cluster.getFileSystem(),
-        this.dir, oldLogDir, conf, null);
+    Path logDir = new Path(this.dir, HConstants.HREGION_LOGDIR_NAME);
+    HLog log = new HLog(cluster.getFileSystem(), logDir, oldLogDir, conf, null);
     HRegion region = new HRegion(dir, log,
         cluster.getFileSystem(),conf, info, null);
     List<KeyValue> result = new ArrayList<KeyValue>();
 
     // Empty set to get all columns
-    NavigableSet<byte[]> qualifiers = new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
+    NavigableSet<byte[]> qualifiers =
+      new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
 
     final byte[] tableName = Bytes.toBytes(TABLE);
     final byte[] rowName = tableName;
@@ -136,9 +136,8 @@ public class TestStoreReconstruction {
     // TODO dont close the file here.
     log.close();
 
-    List<Path> splits =
-        HLog.splitLog(new Path(conf.get(HConstants.HBASE_DIR)),
-            this.dir, oldLogDir, cluster.getFileSystem(), conf);
+    List<Path> splits = HLog.splitLog(new Path(conf.get(HConstants.HBASE_DIR)),
+      logDir, oldLogDir, cluster.getFileSystem(), conf);
 
     // Split should generate only 1 file since there's only 1 region
     assertEquals(1, splits.size());

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java Tue Sep 27 02:41:16 2011
@@ -23,6 +23,7 @@ package org.apache.hadoop.hbase.regionse
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -41,25 +42,39 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 public class TestWideScanner extends HBaseTestCase {
   private final Log LOG = LogFactory.getLog(this.getClass());
 
-  static final int BATCH = 1000;
-
-  private MiniDFSCluster cluster = null;
-  private HRegion r;
-
+  static final byte[] A = Bytes.toBytes("A");
+  static final byte[] B = Bytes.toBytes("B");
+  static final byte[] C = Bytes.toBytes("C");
+  static byte[][] COLUMNS = { A, B, C };
+  static final Random rng = new Random();
   static final HTableDescriptor TESTTABLEDESC =
     new HTableDescriptor("testwidescan");
   static {
-    TESTTABLEDESC.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY,
+    TESTTABLEDESC.addFamily(new HColumnDescriptor(A,
+      10,  // Ten is arbitrary number.  Keep versions to help debuggging.
+      Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
+      HConstants.FOREVER, StoreFile.BloomType.NONE.toString(),
+      HColumnDescriptor.DEFAULT_REPLICATION_SCOPE));
+    TESTTABLEDESC.addFamily(new HColumnDescriptor(B,
+      10,  // Ten is arbitrary number.  Keep versions to help debuggging.
+      Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
+      HConstants.FOREVER, StoreFile.BloomType.NONE.toString(),
+      HColumnDescriptor.DEFAULT_REPLICATION_SCOPE));
+    TESTTABLEDESC.addFamily(new HColumnDescriptor(C,
       10,  // Ten is arbitrary number.  Keep versions to help debuggging.
       Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
       HConstants.FOREVER, StoreFile.BloomType.NONE.toString(),
       HColumnDescriptor.DEFAULT_REPLICATION_SCOPE));
   }
+
   /** HRegionInfo for root region */
   public static final HRegionInfo REGION_INFO =
     new HRegionInfo(TESTTABLEDESC, HConstants.EMPTY_BYTE_ARRAY,
     HConstants.EMPTY_BYTE_ARRAY);
 
+  MiniDFSCluster cluster = null;
+  HRegion r;
+
   @Override
   public void setUp() throws Exception {
     cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
@@ -69,30 +84,15 @@ public class TestWideScanner extends HBa
     super.setUp();
   }
 
-  private int addWideContent(HRegion region, byte[] family)
-      throws IOException {
+  private int addWideContent(HRegion region) throws IOException {
     int count = 0;
-    // add a few rows of 2500 columns (we'll use batch of 1000) to make things
-    // interesting
     for (char c = 'a'; c <= 'c'; c++) {
       byte[] row = Bytes.toBytes("ab" + c);
       int i;
       for (i = 0; i < 2500; i++) {
         byte[] b = Bytes.toBytes(String.format("%10d", i));
         Put put = new Put(row);
-        put.add(family, b, b);
-        region.put(put);
-        count++;
-      }
-    }
-    // add one row of 100,000 columns
-    {
-      byte[] row = Bytes.toBytes("abf");
-      int i;
-      for (i = 0; i < 100000; i++) {
-        byte[] b = Bytes.toBytes(String.format("%10d", i));
-        Put put = new Put(row);
-        put.add(family, b, b);
+        put.add(COLUMNS[rng.nextInt(COLUMNS.length)], b, b);
         region.put(put);
         count++;
       }
@@ -103,11 +103,13 @@ public class TestWideScanner extends HBa
   public void testWideScanBatching() throws IOException {
     try {
       this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
-      int inserted = addWideContent(this.r, HConstants.CATALOG_FAMILY);
+      int inserted = addWideContent(this.r);
       List<KeyValue> results = new ArrayList<KeyValue>();
       Scan scan = new Scan();
-      scan.addFamily(HConstants.CATALOG_FAMILY);
-      scan.setBatch(BATCH);
+      scan.addFamily(A);
+      scan.addFamily(B);
+      scan.addFamily(C);
+      scan.setBatch(1000);
       InternalScanner s = r.getScanner(scan);
       int total = 0;
       int i = 0;
@@ -117,8 +119,8 @@ public class TestWideScanner extends HBa
         i++;
         LOG.info("iteration #" + i + ", results.size=" + results.size());
 
-        // assert that the result set is no larger than BATCH
-        assertTrue(results.size() <= BATCH);
+        // assert that the result set is no larger than 1000
+        assertTrue(results.size() <= 1000);
 
         total += results.size();
 

Copied: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java (from r1176170, hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java)
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java?p2=hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java&p1=hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java&r1=1176170&r2=1176171&rev=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java Tue Sep 27 02:41:16 2011
@@ -1,5 +1,5 @@
 /**
- * Copyright 2007 The Apache Software Foundation
+ * Copyright 2010 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -17,21 +17,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hbase.regionserver;
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.io.IOException;
 
-import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 
-public class TestHRegionInfo extends HBaseTestCase {
-  public void testCreateHRegionInfoName() throws Exception {
-    String tableName = "tablename";
-    final byte [] tn = Bytes.toBytes(tableName);
-    String startKey = "startkey";
-    final byte [] sk = Bytes.toBytes(startKey);
-    String id = "id";
-    byte [] name = HRegionInfo.createRegionName(tn, sk, id);
-    String nameStr = Bytes.toString(name);
-    assertEquals(nameStr, tableName + "," + startKey + "," + id);
-  }
+public class InstrumentedSequenceFileLogWriter extends SequenceFileLogWriter {
+
+  public static boolean activateFailure = false;
+  @Override
+    public void append(HLog.Entry entry) throws IOException {
+      super.append(entry);
+      if (activateFailure && Bytes.equals(entry.getKey().getRegionName(), "break".getBytes())) {
+        System.out.println(getClass().getName() + ": I will throw an exception now...");
+        throw(new IOException("This exception is instrumented and should only be thrown for testing"));
+      }
+    }
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java Tue Sep 27 02:41:16 2011
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
@@ -91,7 +92,7 @@ public class TestHLog extends HBaseTestC
     if (fs.exists(dir)) {
       fs.delete(dir, true);
     }
-    this.oldLogDir = new Path("/hbase", HConstants.HREGION_OLDLOGDIR_NAME);
+    this.oldLogDir = new Path(this.dir, HConstants.HREGION_OLDLOGDIR_NAME);
 
   }
 
@@ -113,7 +114,8 @@ public class TestHLog extends HBaseTestC
 
     final byte [] tableName = Bytes.toBytes(getName());
     final byte [] rowName = tableName;
-    HLog log = new HLog(this.fs, this.dir, this.oldLogDir, this.conf, null);
+    Path logdir = new Path(this.dir, HConstants.HREGION_LOGDIR_NAME);
+    HLog log = new HLog(this.fs, logdir, this.oldLogDir, this.conf, null);
     final int howmany = 3;
     HRegionInfo[] infos = new HRegionInfo[3];
     for(int i = 0; i < howmany; i++) {
@@ -132,7 +134,7 @@ public class TestHLog extends HBaseTestC
             byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
             edit.add(new KeyValue(rowName, family, qualifier,
                 System.currentTimeMillis(), column));
-            System.out.println("Region " + i + ": " + edit);
+            LOG.info("Region " + i + ": " + edit);
             log.append(infos[i], tableName, edit,
               System.currentTimeMillis());
           }
@@ -142,8 +144,9 @@ public class TestHLog extends HBaseTestC
       }
       Configuration new_conf = new Configuration(this.conf);
       new_conf.setBoolean("dfs.support.append", false);
+      Path splitsdir = new Path(this.dir, "splits");
       List<Path> splits =
-        HLog.splitLog(this.testDir, this.dir, this.oldLogDir, this.fs, new_conf);
+        HLog.splitLog(splitsdir, logdir, this.oldLogDir, this.fs, new_conf);
       verifySplits(splits, howmany);
       log = null;
     } finally {
@@ -187,7 +190,7 @@ public class TestHLog extends HBaseTestC
     // gives you EOFE.
     wal.sync();
     // Open a Reader.
-    Path walPath = wal.computeFilename(wal.getFilenum());
+    Path walPath = wal.computeFilename();
     HLog.Reader reader = HLog.getReader(fs, walPath, conf);
     int count = 0;
     HLog.Entry entry = new HLog.Entry();
@@ -265,6 +268,7 @@ public class TestHLog extends HBaseTestC
   throws IOException {
     assertEquals(howmany, splits.size());
     for (int i = 0; i < splits.size(); i++) {
+      LOG.info("Verifying=" + splits.get(i));
       HLog.Reader reader = HLog.getReader(this.fs, splits.get(i), conf);
       try {
         int count = 0;
@@ -279,6 +283,7 @@ public class TestHLog extends HBaseTestC
           if (previousRegion != null) {
             assertEquals(previousRegion, region);
           }
+          LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
           assertTrue(seqno < key.getLogSeqNum());
           seqno = key.getLogSeqNum();
           previousRegion = region;
@@ -313,7 +318,7 @@ public class TestHLog extends HBaseTestC
     }
     // Now call sync to send the data to HDFS datanodes
     wal.sync(true);
-    final Path walPath = wal.computeFilename(wal.getFilenum());
+    final Path walPath = wal.computeFilename();
 
     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
     try {
@@ -351,12 +356,13 @@ public class TestHLog extends HBaseTestC
 
     // Now try recovering the log, like the HMaster would do
     final FileSystem recoveredFs = this.fs;
+    final Configuration rlConf = this.conf;
 
     class RecoverLogThread extends Thread {
       public Exception exception = null;
       public void run() {
           try {
-            HLog.recoverLog(recoveredFs, walPath, true);
+            FSUtils.recoverFileLease(recoveredFs, walPath, rlConf);
           } catch (IOException e) {
             exception = e;
           }
@@ -417,7 +423,7 @@ public class TestHLog extends HBaseTestC
       long logSeqId = log.startCacheFlush();
       log.completeCacheFlush(regionName, tableName, logSeqId, info.isMetaRegion());
       log.close();
-      Path filename = log.computeFilename(log.getFilenum());
+      Path filename = log.computeFilename();
       log = null;
       // Now open a reader on the log and assert append worked.
       reader = HLog.getReader(fs, filename, conf);
@@ -485,7 +491,7 @@ public class TestHLog extends HBaseTestC
       long logSeqId = log.startCacheFlush();
       log.completeCacheFlush(hri.getRegionName(), tableName, logSeqId, false);
       log.close();
-      Path filename = log.computeFilename(log.getFilenum());
+      Path filename = log.computeFilename();
       log = null;
       // Now open a reader on the log and assert append worked.
       reader = HLog.getReader(fs, filename, conf);
@@ -524,4 +530,4 @@ public class TestHLog extends HBaseTestC
       }
     }
   }
-}
\ No newline at end of file
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java?rev=1176171&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java Tue Sep 27 02:41:16 2011
@@ -0,0 +1,764 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.ipc.RemoteException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Testing {@link HLog} splitting code.
+ */
+public class TestHLogSplit {
+
+  private Configuration conf;
+  private FileSystem fs;
+
+  private final static HBaseTestingUtility
+          TEST_UTIL = new HBaseTestingUtility();
+
+
+  private static final Path hbaseDir = new Path("/hbase");
+  private static final Path hlogDir = new Path(hbaseDir, "hlog");
+  private static final Path oldLogDir = new Path(hbaseDir, "hlog.old");
+  private static final Path corruptDir = new Path(hbaseDir, ".corrupt");
+
+  private static final int NUM_WRITERS = 10;
+  private static final int ENTRIES = 10; // entries per writer per region
+
+  private HLog.Writer[] writer = new HLog.Writer[NUM_WRITERS];
+  private long seq = 0;
+  private static final byte[] TABLE_NAME = "t1".getBytes();
+  private static final byte[] FAMILY = "f1".getBytes();
+  private static final byte[] QUALIFIER = "q1".getBytes();
+  private static final byte[] VALUE = "v1".getBytes();
+  private static final String HLOG_FILE_PREFIX = "hlog.dat.";
+  private static List<String> regions;
+  private static final String HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors";
+
+
+  static enum Corruptions {
+    INSERT_GARBAGE_ON_FIRST_LINE,
+    INSERT_GARBAGE_IN_THE_MIDDLE,
+    APPEND_GARBAGE,
+  }
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().
+            setInt("hbase.regionserver.flushlogentries", 1);
+    TEST_UTIL.getConfiguration().
+            setBoolean("dfs.support.append", true);
+    TEST_UTIL.getConfiguration().
+            setStrings("hbase.rootdir", hbaseDir.toString());
+    TEST_UTIL.getConfiguration().
+            setClass("hbase.regionserver.hlog.writer.impl",
+                InstrumentedSequenceFileLogWriter.class, HLog.Writer.class);
+
+    TEST_UTIL.startMiniDFSCluster(2);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniDFSCluster();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    conf = TEST_UTIL.getConfiguration();
+    fs = TEST_UTIL.getDFSCluster().getFileSystem();
+    FileStatus[] entries = fs.listStatus(new Path("/"));
+    for (FileStatus dir : entries){
+      fs.delete(dir.getPath(), true);
+    }
+    seq = 0;
+    regions = new ArrayList<String>();
+    Collections.addAll(regions, "bbb", "ccc");
+    InstrumentedSequenceFileLogWriter.activateFailure = false;
+    // Set the soft lease for hdfs to be down from default of 5 minutes or so.
+    // TODO: If 0.20 hadoop do one thing, if 0.21 hadoop do another.
+    // Not available in 0.20 hdfs
+    // TEST_UTIL.getDFSCluster().getNamesystem().leaseManager.
+    //  setLeasePeriod(100, 50000);
+    // Use reflection to get at the 0.20 version of above.
+    MiniDFSCluster dfsCluster = TEST_UTIL.getDFSCluster();
+    //   private NameNode nameNode;
+    Field field = dfsCluster.getClass().getDeclaredField("nameNode");
+    field.setAccessible(true);
+    NameNode nn = (NameNode)field.get(dfsCluster);
+    nn.namesystem.leaseManager.setLeasePeriod(100, 50000);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test(expected = IOException.class)
+  public void testSplitFailsIfNewHLogGetsCreatedAfterSplitStarted()
+  throws IOException {
+    AtomicBoolean stop = new AtomicBoolean(false);
+    generateHLogs(-1);
+    fs.initialize(fs.getUri(), conf);
+    try {
+    (new ZombieNewLogWriterRegionServer(stop)).start();
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+    } finally {
+      stop.set(true);
+    }
+  }
+
+
+  @Test
+  public void testSplitPreservesEdits() throws IOException{
+    final String REGION = "region__1";
+    regions.removeAll(regions);
+    regions.add(REGION);
+
+    generateHLogs(1, 10, -1);
+    fs.initialize(fs.getUri(), conf);
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+    Path originalLog = (fs.listStatus(oldLogDir))[0].getPath();
+    Path splitLog = getLogForRegion(hbaseDir, TABLE_NAME, REGION);
+
+    assertEquals("edits differ after split", true, logsAreEqual(originalLog, splitLog));
+  }
+
+
+  @Test
+  public void testEmptyLogFiles() throws IOException {
+
+    injectEmptyFile(".empty", true);
+    generateHLogs(Integer.MAX_VALUE);
+    injectEmptyFile("empty", true);
+
+    // make fs act as a different client now
+    // initialize will create a new DFSClient with a new client ID
+    fs.initialize(fs.getUri(), conf);
+
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+
+    for (String region : regions) {
+      Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
+      assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
+    }
+
+  }
+
+
+  @Test
+  public void testEmptyOpenLogFiles() throws IOException {
+    injectEmptyFile(".empty", false);
+    generateHLogs(Integer.MAX_VALUE);
+    injectEmptyFile("empty", false);
+
+    // make fs act as a different client now
+    // initialize will create a new DFSClient with a new client ID
+    fs.initialize(fs.getUri(), conf);
+
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+    for (String region : regions) {
+      Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
+      assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
+    }
+  }
+
+  @Test
+  public void testOpenZeroLengthReportedFileButWithDataGetsSplit() throws IOException {
+    // generate logs but leave hlog.dat.5 open.
+    generateHLogs(5);
+
+    fs.initialize(fs.getUri(), conf);
+
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+    for (String region : regions) {
+      Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
+      assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
+    }
+
+
+  }
+
+
+  @Test
+  public void testTralingGarbageCorruptionFileSkipErrorsPasses() throws IOException {
+    conf.setBoolean(HBASE_SKIP_ERRORS, true);
+    generateHLogs(Integer.MAX_VALUE);
+    corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
+            Corruptions.APPEND_GARBAGE, true, fs);
+    fs.initialize(fs.getUri(), conf);
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+    for (String region : regions) {
+      Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
+      assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
+    }
+
+
+  }
+
+  @Test
+  public void testFirstLineCorruptionLogFileSkipErrorsPasses() throws IOException {
+    conf.setBoolean(HBASE_SKIP_ERRORS, true);
+    generateHLogs(Integer.MAX_VALUE);
+    corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
+            Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true, fs);
+    fs.initialize(fs.getUri(), conf);
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+    for (String region : regions) {
+      Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
+      assertEquals((NUM_WRITERS - 1) * ENTRIES, countHLog(logfile, fs, conf));
+    }
+
+
+  }
+
+
+  @Test
+  public void testMiddleGarbageCorruptionSkipErrorsReadsHalfOfFile() throws IOException {
+    conf.setBoolean(HBASE_SKIP_ERRORS, true);
+    generateHLogs(Integer.MAX_VALUE);
+    corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
+            Corruptions.INSERT_GARBAGE_IN_THE_MIDDLE, false, fs);
+    fs.initialize(fs.getUri(), conf);
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+    for (String region : regions) {
+      Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
+      // the entries in the original logs are alternating regions
+      // considering the sequence file header, the middle corruption should
+      // affect at least half of the entries
+      int goodEntries = (NUM_WRITERS - 1) * ENTRIES;
+      int firstHalfEntries = (int) Math.ceil(ENTRIES / 2) - 1;
+      assertTrue("The file up to the corrupted area hasn't been parsed",
+              goodEntries + firstHalfEntries <= countHLog(logfile, fs, conf));
+    }
+  }
+
+  @Test
+  public void testCorruptedFileGetsArchivedIfSkipErrors() throws IOException {
+    conf.setBoolean(HBASE_SKIP_ERRORS, true);
+
+    Path c1 = new Path(hlogDir, HLOG_FILE_PREFIX + "0");
+    Path c2 = new Path(hlogDir, HLOG_FILE_PREFIX + "5");
+    Path c3 = new Path(hlogDir, HLOG_FILE_PREFIX + (NUM_WRITERS - 1));
+    generateHLogs(-1);
+    corruptHLog(c1, Corruptions.INSERT_GARBAGE_IN_THE_MIDDLE, false, fs);
+    corruptHLog(c2, Corruptions.APPEND_GARBAGE, true, fs);
+    corruptHLog(c3, Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true, fs);
+
+    fs.initialize(fs.getUri(), conf);
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+    FileStatus[] archivedLogs = fs.listStatus(corruptDir);
+
+    assertEquals("expected a different file", c1.getName(), archivedLogs[0].getPath().getName());
+    assertEquals("expected a different file", c2.getName(), archivedLogs[1].getPath().getName());
+    assertEquals("expected a different file", c3.getName(), archivedLogs[2].getPath().getName());
+    assertEquals(archivedLogs.length, 3);
+
+  }
+
+  @Test
+  public void testLogsGetArchivedAfterSplit() throws IOException {
+    conf.setBoolean(HBASE_SKIP_ERRORS, false);
+
+    generateHLogs(-1);
+
+    fs.initialize(fs.getUri(), conf);
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+    FileStatus[] archivedLogs = fs.listStatus(oldLogDir);
+
+    assertEquals("wrong number of files in the archive log", NUM_WRITERS, archivedLogs.length);
+  }
+
+
+
+  @Test(expected = IOException.class)
+  public void testTrailingGarbageCorruptionLogFileSkipErrorsFalseThrows() throws IOException {
+    conf.setBoolean(HBASE_SKIP_ERRORS, false);
+    generateHLogs(Integer.MAX_VALUE);
+    corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
+            Corruptions.APPEND_GARBAGE, true, fs);
+
+    fs.initialize(fs.getUri(), conf);
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+  }
+
+  @Test
+  public void testCorruptedLogFilesSkipErrorsFalseDoesNotTouchLogs() throws IOException {
+    conf.setBoolean(HBASE_SKIP_ERRORS, false);
+    generateHLogs(-1);
+    corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
+            Corruptions.APPEND_GARBAGE, true, fs);
+    fs.initialize(fs.getUri(), conf);
+    try {
+      HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+    } catch (IOException e) {/* expected */}
+
+    assertEquals("if skip.errors is false all files should remain in place",
+            NUM_WRITERS, fs.listStatus(hlogDir).length);
+  }
+
+
+  @Test
+  public void testSplit() throws IOException {
+    generateHLogs(-1);
+    fs.initialize(fs.getUri(), conf);
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+
+
+    for (String region : regions) {
+      Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
+      assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
+
+    }
+  }
+
+  @Test
+  public void testLogDirectoryShouldBeDeletedAfterSuccessfulSplit()
+  throws IOException {
+    generateHLogs(-1);
+    fs.initialize(fs.getUri(), conf);
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+    FileStatus [] statuses = null;
+    try {
+      statuses = fs.listStatus(hlogDir);
+      assertTrue(statuses == null || statuses.length == 0);
+    } catch (FileNotFoundException e) {
+      // hadoop 0.21 throws FNFE whereas hadoop 0.20 returns null
+    }
+  }
+/* DISABLED for now.  TODO: HBASE-2645
+  @Test
+  public void testLogCannotBeWrittenOnceParsed() throws IOException {
+    AtomicLong counter = new AtomicLong(0);
+    AtomicBoolean stop = new AtomicBoolean(false);
+    generateHLogs(9);
+    fs.initialize(fs.getUri(), conf);
+
+    Thread zombie = new ZombieLastLogWriterRegionServer(writer[9], counter, stop);
+
+
+
+    try {
+      zombie.start();
+
+      HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+      Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, "juliet");
+
+      // It's possible that the writer got an error while appending and didn't count it
+      // however the entry will in fact be written to file and split with the rest
+      long numberOfEditsInRegion = countHLog(logfile, fs, conf);
+      assertTrue("The log file could have at most 1 extra log entry, but " +
+              "can't have less. Zombie could write "+counter.get() +" and logfile had only"+ numberOfEditsInRegion+" "  + logfile, counter.get() == numberOfEditsInRegion ||
+                      counter.get() + 1 == numberOfEditsInRegion);
+    } finally {
+      stop.set(true);
+    }
+  }
+*/
+
+  @Test
+  public void testSplitWillNotTouchLogsIfNewHLogGetsCreatedAfterSplitStarted()
+  throws IOException {
+    AtomicBoolean stop = new AtomicBoolean(false);
+    generateHLogs(-1);
+    fs.initialize(fs.getUri(), conf);
+    Thread zombie = new ZombieNewLogWriterRegionServer(stop);
+
+    try {
+      zombie.start();
+      try {
+        HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+      } catch (IOException ex) {/* expected */}
+      int logFilesNumber = fs.listStatus(hlogDir).length;
+
+      assertEquals("Log files should not be archived if there's an extra file after split",
+              NUM_WRITERS + 1, logFilesNumber);
+    } finally {
+      stop.set(true);
+    }
+
+  }
+
+
+
+  @Test(expected = IOException.class)
+  public void testSplitWillFailIfWritingToRegionFails() throws Exception {
+    //leave 5th log open so we could append the "trap"
+    generateHLogs(4);
+
+    fs.initialize(fs.getUri(), conf);
+
+    InstrumentedSequenceFileLogWriter.activateFailure = false;
+    appendEntry(writer[4], TABLE_NAME, Bytes.toBytes("break"), ("r" + 999).getBytes(), FAMILY, QUALIFIER, VALUE, 0);
+    writer[4].close();
+
+
+    try {
+      InstrumentedSequenceFileLogWriter.activateFailure = true;
+      HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+    } catch (IOException e) {
+      assertEquals("java.io.IOException: This exception is instrumented and should only be thrown for testing", e.getMessage());
+      throw e;
+    } finally {
+      InstrumentedSequenceFileLogWriter.activateFailure = false;
+    }
+  }
+
+
+//  @Test
+  public void testSplittingLargeNumberOfRegionsConsistency() throws IOException {
+
+    regions.removeAll(regions);
+    for (int i=0; i<100; i++) {
+      regions.add("region__"+i);
+    }
+
+    generateHLogs(1, 100, -1);
+    fs.initialize(fs.getUri(), conf);
+
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+    fs.rename(oldLogDir, hlogDir);
+    Path firstSplitPath = new Path(hbaseDir, Bytes.toString(TABLE_NAME) + ".first");
+    Path splitPath = new Path(hbaseDir, Bytes.toString(TABLE_NAME));
+    fs.rename(splitPath,
+            firstSplitPath);
+
+
+    fs.initialize(fs.getUri(), conf);
+    HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
+
+    assertEquals(0, compareHLogSplitDirs(firstSplitPath, splitPath));
+  }
+
+
+
+
+  /**
+   * This thread will keep writing to the file after the split process has started
+   * It simulates a region server that was considered dead but woke up and wrote
+   * some more to he last log entry
+   */
+  class ZombieLastLogWriterRegionServer extends Thread {
+    AtomicLong editsCount;
+    AtomicBoolean stop;
+    Path log;
+    HLog.Writer lastLogWriter;
+    public ZombieLastLogWriterRegionServer(HLog.Writer writer, AtomicLong counter, AtomicBoolean stop) {
+      this.stop = stop;
+      this.editsCount = counter;
+      this.lastLogWriter = writer;
+    }
+
+    @Override
+    public void run() {
+      if (stop.get()){
+        return;
+      }
+      flushToConsole("starting");
+      while (true) {
+        try {
+
+          appendEntry(lastLogWriter, TABLE_NAME, "juliet".getBytes(),
+                  ("r" + editsCount).getBytes(), FAMILY, QUALIFIER, VALUE, 0);
+          lastLogWriter.sync();
+          editsCount.incrementAndGet();
+          try {
+            Thread.sleep(1);
+          } catch (InterruptedException e) {
+            //
+          }
+
+
+        } catch (IOException ex) {
+          if (ex instanceof RemoteException) {
+            flushToConsole("Juliet: got RemoteException " +
+                    ex.getMessage() + " while writing " + (editsCount.get() + 1));
+            break;
+          } else {
+            assertTrue("Failed to write " + editsCount.get(), false);
+          }
+
+        }
+      }
+
+
+    }
+  }
+
+  /**
+   * This thread will keep adding new log files
+   * It simulates a region server that was considered dead but woke up and wrote
+   * some more to a new hlog
+   */
+  class ZombieNewLogWriterRegionServer extends Thread {
+    AtomicBoolean stop;
+    public ZombieNewLogWriterRegionServer(AtomicBoolean stop) {
+      super("ZombieNewLogWriterRegionServer");
+      this.stop = stop;
+    }
+
+    @Override
+    public void run() {
+      if (stop.get()) {
+        return;
+      }
+      boolean splitStarted = false;
+      Path p = new Path(hbaseDir, new String(TABLE_NAME));
+      while (!splitStarted) {
+        try {
+          FileStatus [] statuses = fs.listStatus(p);
+          // In 0.20, listStatus comes back with a null if file doesn't exit.
+          // In 0.21, it throws FNFE.
+          if (statuses != null && statuses.length > 0) {
+            // Done.
+            break;
+          }
+        } catch (FileNotFoundException e) {
+          // Expected in hadoop 0.21
+        } catch (IOException e1) {
+          assertTrue("Failed to list status ", false);
+        }
+        flushToConsole("Juliet: split not started, sleeping a bit...");
+        Threads.sleep(100);
+      }
+
+      Path julietLog = new Path(hlogDir, HLOG_FILE_PREFIX + ".juliet");
+      try {
+        HLog.Writer writer = HLog.createWriter(fs,
+                julietLog, conf);
+        appendEntry(writer, "juliet".getBytes(), ("juliet").getBytes(),
+                ("r").getBytes(), FAMILY, QUALIFIER, VALUE, 0);
+        writer.close();
+        flushToConsole("Juliet file creator: created file " + julietLog);
+      } catch (IOException e1) {
+        assertTrue("Failed to create file " + julietLog, false);
+      }
+    }
+  }
+
+  private void flushToConsole(String s) {
+    System.out.println(s);
+    System.out.flush();
+  }
+
+
+  private void generateHLogs(int leaveOpen) throws IOException {
+    generateHLogs(NUM_WRITERS, ENTRIES, leaveOpen);
+  }
+
+  private void generateHLogs(int writers, int entries, int leaveOpen) throws IOException {
+    for (int i = 0; i < writers; i++) {
+      writer[i] = HLog.createWriter(fs, new Path(hlogDir, HLOG_FILE_PREFIX + i), conf);
+      for (int j = 0; j < entries; j++) {
+        int prefix = 0;
+        for (String region : regions) {
+          String row_key = region + prefix++ + i + j;
+          appendEntry(writer[i], TABLE_NAME, region.getBytes(),
+                  row_key.getBytes(), FAMILY, QUALIFIER, VALUE, seq);
+        }
+      }
+      if (i != leaveOpen) {
+        writer[i].close();
+        flushToConsole("Closing writer " + i);
+      }
+    }
+  }
+
+  private Path getLogForRegion(Path rootdir, byte[] table, String region) {
+    return new Path(HRegion.getRegionDir(HTableDescriptor
+            .getTableDir(rootdir, table),
+            HRegionInfo.encodeRegionName(region.getBytes())),
+            HConstants.HREGION_OLDLOGFILE_NAME);
+  }
+
+  private void corruptHLog(Path path, Corruptions corruption, boolean close,
+                           FileSystem fs) throws IOException {
+
+    FSDataOutputStream out;
+    int fileSize = (int) fs.listStatus(path)[0].getLen();
+
+    FSDataInputStream in = fs.open(path);
+    byte[] corrupted_bytes = new byte[fileSize];
+    in.readFully(0, corrupted_bytes, 0, fileSize);
+    in.close();
+
+    switch (corruption) {
+      case APPEND_GARBAGE:
+        out = fs.append(path);
+        out.write("-----".getBytes());
+        closeOrFlush(close, out);
+        break;
+
+      case INSERT_GARBAGE_ON_FIRST_LINE:
+        fs.delete(path, false);
+        out = fs.create(path);
+        out.write(0);
+        out.write(corrupted_bytes);
+        closeOrFlush(close, out);
+        break;
+
+      case INSERT_GARBAGE_IN_THE_MIDDLE:
+        fs.delete(path, false);
+        out = fs.create(path);
+        int middle = (int) Math.floor(corrupted_bytes.length / 2);
+        out.write(corrupted_bytes, 0, middle);
+        out.write(0);
+        out.write(corrupted_bytes, middle, corrupted_bytes.length - middle);
+        closeOrFlush(close, out);
+        break;
+    }
+
+
+  }
+
+  private void closeOrFlush(boolean close, FSDataOutputStream out)
+  throws IOException {
+    if (close) {
+      out.close();
+    } else {
+      out.sync();
+      // Not in 0out.hflush();
+    }
+  }
+
+  @SuppressWarnings("unused")
+  private void dumpHLog(Path log, FileSystem fs, Configuration conf) throws IOException {
+    HLog.Entry entry;
+    HLog.Reader in = HLog.getReader(fs, log, conf);
+    while ((entry = in.next()) != null) {
+      System.out.println(entry);
+    }
+  }
+
+  private int countHLog(Path log, FileSystem fs, Configuration conf) throws IOException {
+    int count = 0;
+    HLog.Reader in = HLog.getReader(fs, log, conf);
+    while (in.next() != null) {
+      count++;
+    }
+    return count;
+  }
+
+
+  public long appendEntry(HLog.Writer writer, byte[] table, byte[] region,
+                          byte[] row, byte[] family, byte[] qualifier,
+                          byte[] value, long seq)
+          throws IOException {
+
+    long time = System.nanoTime();
+    WALEdit edit = new WALEdit();
+    seq++;
+    edit.add(new KeyValue(row, family, qualifier, time, KeyValue.Type.Put, value));
+    writer.append(new HLog.Entry(new HLogKey(region, table, seq, time), edit));
+    writer.sync();
+    return seq;
+
+  }
+
+
+  private void injectEmptyFile(String suffix, boolean closeFile)
+          throws IOException {
+    HLog.Writer writer = HLog.createWriter(
+            fs, new Path(hlogDir, HLOG_FILE_PREFIX + suffix), conf);
+    if (closeFile) writer.close();
+  }
+
+  @SuppressWarnings("unused")
+  private void listLogs(FileSystem fs, Path dir) throws IOException {
+    for (FileStatus file : fs.listStatus(dir)) {
+      System.out.println(file.getPath());
+    }
+
+  }
+
+  private int compareHLogSplitDirs(Path p1, Path p2) throws IOException {
+    FileStatus[] f1 = fs.listStatus(p1);
+    FileStatus[] f2 = fs.listStatus(p2);
+
+    for (int i=0; i<f1.length; i++) {
+      if (!logsAreEqual(new Path(f1[i].getPath(), HConstants.HREGION_OLDLOGFILE_NAME),
+              new Path(f2[i].getPath(), HConstants.HREGION_OLDLOGFILE_NAME))) {
+        return -1;
+      }
+    }
+    return 0;
+  }
+
+  private boolean logsAreEqual(Path p1, Path p2) throws IOException {
+    HLog.Reader in1, in2;
+    in1 = HLog.getReader(fs, p1, conf);
+    in2 = HLog.getReader(fs, p2, conf);
+    HLog.Entry entry1;
+    HLog.Entry entry2;
+    while ((entry1 = in1.next()) != null) {
+      entry2 = in2.next();
+      if ((entry1.getKey().compareTo(entry2.getKey()) != 0) ||
+              (!entry1.getEdit().toString().equals(entry2.getEdit().toString()))) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogActionsListener.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogActionsListener.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogActionsListener.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogActionsListener.java Tue Sep 27 02:41:16 2011
@@ -84,7 +84,7 @@ public class TestLogActionsListener {
   public void testActionListener() throws Exception {
     DummyLogActionsListener list = new DummyLogActionsListener();
     DummyLogActionsListener laterList = new DummyLogActionsListener();
-    HLog hlog = new HLog(fs, logDir, oldLogDir, conf, null, list);
+    HLog hlog = new HLog(fs, logDir, oldLogDir, conf, null, list, null);
     HRegionInfo hri = new HRegionInfo(new HTableDescriptor(SOME_BYTES),
         SOME_BYTES, SOME_BYTES, false);
 

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java Tue Sep 27 02:41:16 2011
@@ -31,13 +31,13 @@ import org.apache.hadoop.hbase.HBaseClus
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -220,7 +220,7 @@ public class TestLogRolling extends HBas
     
     assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas());
     // don't run this test without append support (HDFS-200 & HDFS-142)
-    assertTrue("Need append support for this test", HLog.isAppend(conf));
+    assertTrue("Need append support for this test", FSUtils.isAppendSupported(conf));
 
     // add up the datanode count, to ensure proper replication when we kill 1
     dfsCluster.startDataNodes(conf, 1, true, null, null);

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java Tue Sep 27 02:41:16 2011
@@ -16,9 +16,6 @@ public class HBaseRESTClusterTestBase ex
   static final Log LOG =
     LogFactory.getLog(HBaseRESTClusterTestBase.class);
 
-  // use a nonstandard port
-  static final int DEFAULT_TEST_PORT = 38080;
-
   protected int testServletPort;
   Server server;
 
@@ -49,8 +46,7 @@ public class HBaseRESTClusterTestBase ex
     LOG.info("configured " + ServletContainer.class.getName());
     
     // set up Jetty and run the embedded server
-    testServletPort = conf.getInt("hbase.rest.port", DEFAULT_TEST_PORT);
-    server = new Server(testServletPort);
+    server = new Server(0);
     server.setSendServerVersion(false);
     server.setSendDateHeader(false);
       // set up context
@@ -58,6 +54,8 @@ public class HBaseRESTClusterTestBase ex
     context.addServlet(sh, "/*");
       // start the server
     server.start();
+      // get the port
+    testServletPort = server.getConnectors()[0].getLocalPort();
 
     LOG.info("started " + server.getClass().getName() + " on port " + 
       testServletPort);

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java Tue Sep 27 02:41:16 2011
@@ -34,7 +34,6 @@ import org.apache.commons.httpclient.Hea
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.rest.client.Client;
 import org.apache.hadoop.hbase.rest.client.Cluster;
@@ -45,48 +44,43 @@ import org.apache.hadoop.hbase.rest.mode
 import org.apache.hadoop.hbase.util.Bytes;
 
 public class TestRowResource extends HBaseRESTClusterTestBase {
+  static final String TABLE = "TestRowResource";
+  static final String CFA = "a";
+  static final String CFB = "b";
+  static final String COLUMN_1 = CFA + ":1";
+  static final String COLUMN_2 = CFB + ":2";
+  static final String ROW_1 = "testrow1";
+  static final String VALUE_1 = "testvalue1";
+  static final String ROW_2 = "testrow2";
+  static final String VALUE_2 = "testvalue2";
+  static final String ROW_3 = "testrow3";
+  static final String VALUE_3 = "testvalue3";
+  static final String ROW_4 = "testrow4";
+  static final String VALUE_4 = "testvalue4";
+
+  Client client;
+  JAXBContext context;
+  Marshaller marshaller;
+  Unmarshaller unmarshaller;
+  HBaseAdmin admin;
 
-  private static final String TABLE = "TestRowResource";
-  private static final String COLUMN_1 = "a:";
-  private static final String COLUMN_2 = "b:";
-  private static final String ROW_1 = "testrow1";
-  private static final String VALUE_1 = "testvalue1";
-  private static final String ROW_2 = "testrow2";
-  private static final String VALUE_2 = "testvalue2";
-  private static final String ROW_3 = "testrow3";
-  private static final String VALUE_3 = "testvalue3";
-  private static final String ROW_4 = "testrow4";
-  private static final String VALUE_4 = "testvalue4";
-
-  private Client client;
-  private JAXBContext context;
-  private Marshaller marshaller;
-  private Unmarshaller unmarshaller;
-  private HBaseAdmin admin;
-
-  public TestRowResource() throws JAXBException {
-    super();
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
     context = JAXBContext.newInstance(
         CellModel.class,
         CellSetModel.class,
         RowModel.class);
     marshaller = context.createMarshaller();
     unmarshaller = context.createUnmarshaller();
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
     client = new Client(new Cluster().add("localhost", testServletPort));
     admin = new HBaseAdmin(conf);
     if (admin.tableExists(TABLE)) {
       return;
     }
     HTableDescriptor htd = new HTableDescriptor(TABLE);
-    htd.addFamily(new HColumnDescriptor(KeyValue.parseColumn(
-        Bytes.toBytes(COLUMN_1))[0]));
-    htd.addFamily(new HColumnDescriptor(KeyValue.parseColumn(
-        Bytes.toBytes(COLUMN_2))[0]));
+    htd.addFamily(new HColumnDescriptor(CFA));
+    htd.addFamily(new HColumnDescriptor(CFB));
     admin.createTable(htd);
   }
 
@@ -96,7 +90,7 @@ public class TestRowResource extends HBa
     super.tearDown();
   }
 
-  private Response deleteRow(String table, String row) throws IOException {
+  Response deleteRow(String table, String row) throws IOException {
     StringBuilder path = new StringBuilder();
     path.append('/');
     path.append(table);
@@ -107,7 +101,7 @@ public class TestRowResource extends HBa
     return response;
   }
 
-  private Response deleteValue(String table, String row, String column)
+  Response deleteValue(String table, String row, String column)
       throws IOException {
     StringBuilder path = new StringBuilder();
     path.append('/');
@@ -121,7 +115,7 @@ public class TestRowResource extends HBa
     return response;
   }
 
-  private Response getValueXML(String table, String row, String column)
+  Response getValueXML(String table, String row, String column)
       throws IOException {
     StringBuilder path = new StringBuilder();
     path.append('/');
@@ -134,7 +128,7 @@ public class TestRowResource extends HBa
     return response;
   }
 
-  private Response getValuePB(String table, String row, String column) 
+  Response getValuePB(String table, String row, String column)
       throws IOException {
     StringBuilder path = new StringBuilder();
     path.append('/');
@@ -147,8 +141,8 @@ public class TestRowResource extends HBa
     return response;
   }
 
-  private Response putValueXML(String table, String row, String column,
-      String value) throws IOException, JAXBException {
+  Response putValueXML(String table, String row, String column, String value)
+      throws IOException, JAXBException {
     StringBuilder path = new StringBuilder();
     path.append('/');
     path.append(table);
@@ -157,7 +151,8 @@ public class TestRowResource extends HBa
     path.append('/');
     path.append(column);
     RowModel rowModel = new RowModel(row);
-    rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value)));
+    rowModel.addCell(new CellModel(Bytes.toBytes(column),
+      Bytes.toBytes(value)));
     CellSetModel cellSetModel = new CellSetModel();
     cellSetModel.addRow(rowModel);
     StringWriter writer = new StringWriter();
@@ -168,8 +163,8 @@ public class TestRowResource extends HBa
     return response;
   }
 
-  private void checkValueXML(String table, String row, String column, 
-      String value) throws IOException, JAXBException {
+  void checkValueXML(String table, String row, String column, String value)
+      throws IOException, JAXBException {
     Response response = getValueXML(table, row, column);
     assertEquals(response.getCode(), 200);
     CellSetModel cellSet = (CellSetModel)
@@ -180,8 +175,8 @@ public class TestRowResource extends HBa
     assertEquals(Bytes.toString(cell.getValue()), value);
   }
 
-  private Response putValuePB(String table, String row, String column,
-      String value) throws IOException {
+  Response putValuePB(String table, String row, String column, String value)
+      throws IOException {
     StringBuilder path = new StringBuilder();
     path.append('/');
     path.append(table);
@@ -190,7 +185,8 @@ public class TestRowResource extends HBa
     path.append('/');
     path.append(column);
     RowModel rowModel = new RowModel(row);
-    rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value)));
+    rowModel.addCell(new CellModel(Bytes.toBytes(column),
+      Bytes.toBytes(value)));
     CellSetModel cellSetModel = new CellSetModel();
     cellSetModel.addRow(rowModel);
     Response response = client.put(path.toString(), MIMETYPE_PROTOBUF,
@@ -199,8 +195,8 @@ public class TestRowResource extends HBa
     return response;
   }
 
-  private void checkValuePB(String table, String row, String column, 
-      String value) throws IOException {
+  void checkValuePB(String table, String row, String column, String value)
+      throws IOException {
     Response response = getValuePB(table, row, column);
     assertEquals(response.getCode(), 200);
     CellSetModel cellSet = new CellSetModel();
@@ -211,7 +207,7 @@ public class TestRowResource extends HBa
     assertEquals(Bytes.toString(cell.getValue()), value);
   }
 
-  public void testDelete() throws IOException, JAXBException {
+  void doTestDelete() throws IOException, JAXBException {
     Response response;
     
     response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
@@ -235,7 +231,7 @@ public class TestRowResource extends HBa
     assertEquals(response.getCode(), 404);
   }
 
-  public void testSingleCellGetPutXML() throws IOException, JAXBException {
+  void doTestSingleCellGetPutXML() throws IOException, JAXBException {
     Response response = getValueXML(TABLE, ROW_1, COLUMN_1);
     assertEquals(response.getCode(), 404);
 
@@ -250,7 +246,7 @@ public class TestRowResource extends HBa
     assertEquals(response.getCode(), 200);    
   }
 
-  public void testSingleCellGetPutPB() throws IOException, JAXBException {
+  void doTestSingleCellGetPutPB() throws IOException, JAXBException {
     Response response = getValuePB(TABLE, ROW_1, COLUMN_1);
     assertEquals(response.getCode(), 404);
 
@@ -269,7 +265,7 @@ public class TestRowResource extends HBa
     assertEquals(response.getCode(), 200);    
   }
 
-  public void testSingleCellGetPutBinary() throws IOException {
+  void doTestSingleCellGetPutBinary() throws IOException {
     final String path = "/" + TABLE + "/" + ROW_3 + "/" + COLUMN_1;
     final byte[] body = Bytes.toBytes(VALUE_3);
     Response response = client.put(path, MIMETYPE_BINARY, body);
@@ -292,7 +288,7 @@ public class TestRowResource extends HBa
     assertEquals(response.getCode(), 200);
   }
 
-  public void testSingleCellGetJSON() throws IOException, JAXBException {
+  void doTestSingleCellGetJSON() throws IOException, JAXBException {
     final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
     Response response = client.put(path, MIMETYPE_BINARY,
       Bytes.toBytes(VALUE_4));
@@ -304,7 +300,7 @@ public class TestRowResource extends HBa
     assertEquals(response.getCode(), 200);
   }
 
-  public void testURLEncodedKey() throws IOException, JAXBException {
+  void doTestURLEncodedKey() throws IOException, JAXBException {
     String encodedKey = URLEncoder.encode("http://www.google.com/", 
       HConstants.UTF8_ENCODING);
     Response response;
@@ -316,17 +312,21 @@ public class TestRowResource extends HBa
     checkValueXML(TABLE, encodedKey, COLUMN_2, VALUE_2);
   }
 
-  public void testMultiCellGetPutXML() throws IOException, JAXBException {
+  void doTestMultiCellGetPutXML() throws IOException, JAXBException {
     String path = "/" + TABLE + "/fakerow";  // deliberate nonexistent row
 
     CellSetModel cellSetModel = new CellSetModel();
     RowModel rowModel = new RowModel(ROW_1);
-    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1)));
-    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2)));
+    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+      Bytes.toBytes(VALUE_1)));
+    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+      Bytes.toBytes(VALUE_2)));
     cellSetModel.addRow(rowModel);
     rowModel = new RowModel(ROW_2);
-    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3)));
-    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4)));
+    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+      Bytes.toBytes(VALUE_3)));
+    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+      Bytes.toBytes(VALUE_4)));
     cellSetModel.addRow(rowModel);
     StringWriter writer = new StringWriter();
     marshaller.marshal(cellSetModel, writer);
@@ -350,17 +350,21 @@ public class TestRowResource extends HBa
     assertEquals(response.getCode(), 200);
   }
 
-  public void testMultiCellGetPutPB() throws IOException {
+  void doTestMultiCellGetPutPB() throws IOException {
     String path = "/" + TABLE + "/fakerow";  // deliberate nonexistent row
 
     CellSetModel cellSetModel = new CellSetModel();
     RowModel rowModel = new RowModel(ROW_1);
-    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1)));
-    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2)));
+    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+      Bytes.toBytes(VALUE_1)));
+    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+      Bytes.toBytes(VALUE_2)));
     cellSetModel.addRow(rowModel);
     rowModel = new RowModel(ROW_2);
-    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3)));
-    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4)));
+    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+      Bytes.toBytes(VALUE_3)));
+    rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+      Bytes.toBytes(VALUE_4)));
     cellSetModel.addRow(rowModel);
     Response response = client.put(path, MIMETYPE_PROTOBUF,
       cellSetModel.createProtobufOutput());
@@ -381,4 +385,15 @@ public class TestRowResource extends HBa
     response = deleteRow(TABLE, ROW_2);
     assertEquals(response.getCode(), 200);
   }
+
+  public void testRowResource() throws Exception {
+    doTestDelete();
+    doTestSingleCellGetPutXML();
+    doTestSingleCellGetPutPB();
+    doTestSingleCellGetPutBinary();
+    doTestSingleCellGetJSON();
+    doTestURLEncodedKey();
+    doTestMultiCellGetPutXML();
+    doTestMultiCellGetPutPB();
+  }
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java Tue Sep 27 02:41:16 2011
@@ -48,21 +48,22 @@ import org.apache.hadoop.hbase.rest.mode
 import org.apache.hadoop.hbase.util.Bytes;
 
 public class TestScannerResource extends HBaseRESTClusterTestBase {
+  static final String TABLE = "TestScannerResource";
+  static final String CFA = "a";
+  static final String CFB = "b";
+  static final String COLUMN_1 = CFA + ":1";
+  static final String COLUMN_2 = CFB + ":2";
+
+  static int expectedRows1;
+  static int expectedRows2;
+
+  Client client;
+  JAXBContext context;
+  Marshaller marshaller;
+  Unmarshaller unmarshaller;
+  HBaseAdmin admin;
 
-  private static final String TABLE = "TestScannerResource";
-  private static final String COLUMN_1 = "a:";
-  private static final String COLUMN_2 = "b:";
-
-  private static int expectedRows1;
-  private static int expectedRows2;
-
-  private Client client;
-  private JAXBContext context;
-  private Marshaller marshaller;
-  private Unmarshaller unmarshaller;
-  private HBaseAdmin admin;
-
-  private int insertData(String tableName, String column, double prob)
+  int insertData(String tableName, String column, double prob)
       throws IOException {
     Random rng = new Random();
     int count = 0;
@@ -77,11 +78,7 @@ public class TestScannerResource extends
             k[1] = b2;
             k[2] = b3;
             Put put = new Put(k);
-            if(famAndQf.length == 1) {
-              put.add(famAndQf[0], new byte[0], k);
-            } else {
-              put.add(famAndQf[0], famAndQf[1], k);
-            }
+            put.add(famAndQf[0], famAndQf[1], k);
             table.put(put);
             count++;
           }
@@ -92,8 +89,9 @@ public class TestScannerResource extends
     return count;
   }
 
-  public TestScannerResource() throws JAXBException {
-    super();
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
     context = JAXBContext.newInstance(
         CellModel.class,
         CellSetModel.class,
@@ -101,21 +99,14 @@ public class TestScannerResource extends
         ScannerModel.class);
     marshaller = context.createMarshaller();
     unmarshaller = context.createUnmarshaller();
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
     client = new Client(new Cluster().add("localhost", testServletPort));
     admin = new HBaseAdmin(conf);
     if (admin.tableExists(TABLE)) {
       return;
     }
     HTableDescriptor htd = new HTableDescriptor(TABLE);
-    htd.addFamily(new HColumnDescriptor(KeyValue.parseColumn(
-        Bytes.toBytes(COLUMN_1))[0]));
-    htd.addFamily(new HColumnDescriptor(KeyValue.parseColumn(
-        Bytes.toBytes(COLUMN_2))[0]));
+    htd.addFamily(new HColumnDescriptor(CFA));
+    htd.addFamily(new HColumnDescriptor(CFB));
     admin.createTable(htd);
     expectedRows1 = insertData(TABLE, COLUMN_1, 1.0);
     expectedRows2 = insertData(TABLE, COLUMN_2, 0.5);
@@ -127,7 +118,7 @@ public class TestScannerResource extends
     super.tearDown();
   }
 
-  private int countCellSet(CellSetModel model) {
+  int countCellSet(CellSetModel model) {
     int count = 0;
     Iterator<RowModel> rows = model.getRows().iterator();
     while (rows.hasNext()) {
@@ -141,7 +132,7 @@ public class TestScannerResource extends
     return count;
   }
 
-  public void testSimpleScannerXML() throws IOException, JAXBException {
+  void doTestSimpleScannerXML() throws IOException, JAXBException {
     final int BATCH_SIZE = 5;
     // new scanner
     ScannerModel model = new ScannerModel();
@@ -169,7 +160,7 @@ public class TestScannerResource extends
     assertEquals(response.getCode(), 200);
   }
 
-  public void testSimpleScannerPB() throws IOException {
+  void doTestSimpleScannerPB() throws IOException {
     final int BATCH_SIZE = 10;
     // new scanner
     ScannerModel model = new ScannerModel();
@@ -194,7 +185,7 @@ public class TestScannerResource extends
     assertEquals(response.getCode(), 200);
   }
 
-  public void testSimpleScannerBinary() throws IOException {
+  void doTestSimpleScannerBinary() throws IOException {
     // new scanner
     ScannerModel model = new ScannerModel();
     model.setBatch(1);
@@ -231,7 +222,7 @@ public class TestScannerResource extends
     assertEquals(response.getCode(), 200);
   }
 
-  private int fullTableScan(ScannerModel model) throws IOException {
+  int fullTableScan(ScannerModel model) throws IOException {
     model.setBatch(100);
     Response response = client.put("/" + TABLE + "/scanner",
         MIMETYPE_PROTOBUF, model.createProtobufOutput());
@@ -264,7 +255,7 @@ public class TestScannerResource extends
     return count;
   }
 
-  public void testFullTableScan() throws IOException {
+  void doTestFullTableScan() throws IOException {
     ScannerModel model = new ScannerModel();
     model.addColumn(Bytes.toBytes(COLUMN_1));
     assertEquals(fullTableScan(model), expectedRows1);
@@ -273,4 +264,11 @@ public class TestScannerResource extends
     model.addColumn(Bytes.toBytes(COLUMN_2));
     assertEquals(fullTableScan(model), expectedRows2);
   }
+
+  public void testScannerResource() throws Exception {
+    doTestSimpleScannerXML();
+    doTestSimpleScannerPB();
+    doTestSimpleScannerBinary();
+    doTestFullTableScan();
+  }
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java Tue Sep 27 02:41:16 2011
@@ -28,7 +28,6 @@ import java.util.Iterator;
 import java.util.List;
 
 import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
 import javax.xml.bind.Marshaller;
 import javax.xml.bind.Unmarshaller;
 
@@ -69,47 +68,46 @@ import org.apache.hadoop.hbase.util.Byte
 
 public class TestScannersWithFilters extends HBaseRESTClusterTestBase {
 
-  private static final Log LOG =
-    LogFactory.getLog(TestScannersWithFilters.class);
+  static final Log LOG = LogFactory.getLog(TestScannersWithFilters.class);
 
-  private Client client;
-  private JAXBContext context;
-  private Marshaller marshaller;
-  private Unmarshaller unmarshaller;
-
-  private static final byte [][] ROWS_ONE = {
+  static final byte [][] ROWS_ONE = {
     Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"),
     Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3")
   };
 
-  private static final byte [][] ROWS_TWO = {
+  static final byte [][] ROWS_TWO = {
     Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-1"),
     Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3")
   };
 
-  private static final byte [][] FAMILIES = {
+  static final byte [][] FAMILIES = {
     Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo")
   };
 
-  private static final byte [][] QUALIFIERS_ONE = {
+  static final byte [][] QUALIFIERS_ONE = {
     Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"),
     Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3")
   };
 
-  private static final byte [][] QUALIFIERS_TWO = {
+  static final byte [][] QUALIFIERS_TWO = {
     Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"),
     Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3")
   };
 
-  private static final byte [][] VALUES = {
+  static final byte [][] VALUES = {
     Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo")
   };
 
-  private long numRows = ROWS_ONE.length + ROWS_TWO.length;
-  private long colsPerRow = FAMILIES.length * QUALIFIERS_ONE.length;
+  Client client;
+  JAXBContext context;
+  Marshaller marshaller;
+  Unmarshaller unmarshaller;
+  long numRows = ROWS_ONE.length + ROWS_TWO.length;
+  long colsPerRow = FAMILIES.length * QUALIFIERS_ONE.length;
 
-  public TestScannersWithFilters() throws JAXBException {
-    super();
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
     context = JAXBContext.newInstance(
         CellModel.class,
         CellSetModel.class,
@@ -117,11 +115,6 @@ public class TestScannersWithFilters ext
         ScannerModel.class);
     marshaller = context.createMarshaller();
     unmarshaller = context.createUnmarshaller();
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
     client = new Client(new Cluster().add("localhost", testServletPort));
     HBaseAdmin admin = new HBaseAdmin(conf);
     if (!admin.tableExists(getName())) {
@@ -200,7 +193,7 @@ public class TestScannersWithFilters ext
     super.tearDown();
   }
 
-  private void verifyScan(Scan s, long expectedRows, long expectedKeys) 
+  void verifyScan(Scan s, long expectedRows, long expectedKeys)
       throws Exception {
     ScannerModel model = ScannerModel.fromScan(s);
     model.setBatch(Integer.MAX_VALUE); // fetch it all at once
@@ -234,7 +227,7 @@ public class TestScannersWithFilters ext
     assertEquals(response.getCode(), 200);
   }
 
-  private void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception {
+  void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception {
     ScannerModel model = ScannerModel.fromScan(s);
     model.setBatch(Integer.MAX_VALUE); // fetch it all at once
     StringWriter writer = new StringWriter();
@@ -286,7 +279,7 @@ public class TestScannersWithFilters ext
       kvs.length, idx);
   }
 
-  private void verifyScanNoEarlyOut(Scan s, long expectedRows, long expectedKeys) 
+  void verifyScanNoEarlyOut(Scan s, long expectedRows, long expectedKeys)
       throws Exception {
     ScannerModel model = ScannerModel.fromScan(s);
     model.setBatch(Integer.MAX_VALUE); // fetch it all at once
@@ -327,7 +320,7 @@ public class TestScannersWithFilters ext
       " rows", expectedRows, j);
   }
 
-  public void testNoFilter() throws Exception {
+  void doTestNoFilter() throws Exception {
     // No filter
     long expectedRows = this.numRows;
     long expectedKeys = this.colsPerRow;
@@ -342,7 +335,7 @@ public class TestScannersWithFilters ext
     verifyScan(s, expectedRows, expectedKeys/2);
   }
 
-  public void testPrefixFilter() throws Exception {
+  void doTestPrefixFilter() throws Exception {
     // Grab rows from group one (half of total)
     long expectedRows = this.numRows / 2;
     long expectedKeys = this.colsPerRow;
@@ -351,8 +344,7 @@ public class TestScannersWithFilters ext
     verifyScan(s, expectedRows, expectedKeys);
   }
 
-  public void testPageFilter() throws Exception {
-    
+  void doTestPageFilter() throws Exception {
     // KVs in first 6 rows
     KeyValue [] expectedKVs = {
       // testRowOne-0
@@ -436,8 +428,7 @@ public class TestScannersWithFilters ext
     verifyScanFull(s, Arrays.copyOf(expectedKVs, 6));    
   }
 
-  public void testInclusiveStopFilter() throws Exception {
-
+  void doTestInclusiveStopFilter() throws Exception {
     // Grab rows from group one
     
     // If we just use start/stop row, we get total/2 - 1 rows
@@ -470,8 +461,7 @@ public class TestScannersWithFilters ext
 
   }
   
-  public void testQualifierFilter() throws Exception {
-    
+  void doTestQualifierFilter() throws Exception {
     // Match two keys (one from each family) in half the rows
     long expectedRows = this.numRows / 2;
     long expectedKeys = 2;
@@ -536,7 +526,8 @@ public class TestScannersWithFilters ext
     
     // Match keys not equal to
     // Look across rows and fully validate the keys and ordering
-    // Expect varied numbers of keys, 4 per row in group one, 6 per row in group two
+    // Expect varied numbers of keys, 4 per row in group one, 6 per row in
+    // group two
     f = new QualifierFilter(CompareOp.NOT_EQUAL,
         new BinaryComparator(QUALIFIERS_ONE[2]));
     s = new Scan();
@@ -582,7 +573,6 @@ public class TestScannersWithFilters ext
     };
     verifyScanFull(s, kvs);
      
-    
     // Test across rows and groups with a regex
     // Filter out "test*-2"
     // Expect 4 keys per row across both groups
@@ -624,11 +614,9 @@ public class TestScannersWithFilters ext
         new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
     };
     verifyScanFull(s, kvs);
-     
   }
   
-  public void testRowFilter() throws Exception {
-
+  void doTestRowFilter() throws Exception {
     // Match a single row, all keys
     long expectedRows = 1;
     long expectedKeys = this.colsPerRow;
@@ -743,7 +731,6 @@ public class TestScannersWithFilters ext
         new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
     };
     verifyScanFull(s, kvs);
-     
     
     // Test across rows and groups with a regex
     // Filter out everything that doesn't match "*-2"
@@ -770,11 +757,9 @@ public class TestScannersWithFilters ext
         new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1])
     };
     verifyScanFull(s, kvs);
-     
   }
   
-  public void testValueFilter() throws Exception {
-    
+  void doTestValueFilter() throws Exception {
     // Match group one rows
     long expectedRows = this.numRows / 2;
     long expectedKeys = this.colsPerRow;
@@ -896,8 +881,7 @@ public class TestScannersWithFilters ext
     verifyScanFull(s, kvs);
   }
   
-  public void testSkipFilter() throws Exception {
-    
+  void doTestSkipFilter() throws Exception {
     // Test for qualifier regex: "testQualifierOne-2"
     // Should only get rows from second group, and all keys
     Filter f = new SkipFilter(new QualifierFilter(CompareOp.NOT_EQUAL,
@@ -931,15 +915,17 @@ public class TestScannersWithFilters ext
     verifyScanFull(s, kvs);
   }
     
-  public void testFilterList() throws Exception {
-    
+  void doTestFilterList() throws Exception {
     // Test getting a single row, single key using Row, Qualifier, and Value 
     // regular expression and substring filters
     // Use must pass all
     List<Filter> filters = new ArrayList<Filter>();
-    filters.add(new RowFilter(CompareOp.EQUAL, new RegexStringComparator(".+-2")));
-    filters.add(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator(".+-2")));
-    filters.add(new ValueFilter(CompareOp.EQUAL, new SubstringComparator("One")));
+    filters.add(new RowFilter(CompareOp.EQUAL,
+      new RegexStringComparator(".+-2")));
+    filters.add(new QualifierFilter(CompareOp.EQUAL,
+      new RegexStringComparator(".+-2")));
+    filters.add(new ValueFilter(CompareOp.EQUAL,
+      new SubstringComparator("One")));
     Filter f = new FilterList(Operator.MUST_PASS_ALL, filters);
     Scan s = new Scan();
     s.addFamily(FAMILIES[0]);
@@ -949,19 +935,22 @@ public class TestScannersWithFilters ext
     };
     verifyScanFull(s, kvs);
 
-    // Test getting everything with a MUST_PASS_ONE filter including row, qf, val
-    // regular expression and substring filters
+    // Test getting everything with a MUST_PASS_ONE filter including row, qf,
+    // val, regular expression and substring filters
     filters.clear();
-    filters.add(new RowFilter(CompareOp.EQUAL, new RegexStringComparator(".+Two.+")));
-    filters.add(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator(".+-2")));
-    filters.add(new ValueFilter(CompareOp.EQUAL, new SubstringComparator("One")));
+    filters.add(new RowFilter(CompareOp.EQUAL,
+      new RegexStringComparator(".+Two.+")));
+    filters.add(new QualifierFilter(CompareOp.EQUAL,
+      new RegexStringComparator(".+-2")));
+    filters.add(new ValueFilter(CompareOp.EQUAL,
+      new SubstringComparator("One")));
     f = new FilterList(Operator.MUST_PASS_ONE, filters);
     s = new Scan();
     s.setFilter(f);
     verifyScanNoEarlyOut(s, this.numRows, this.colsPerRow);    
   }
   
-  public void testFirstKeyOnlyFilter() throws Exception {
+  void doTestFirstKeyOnlyFilter() throws Exception {
     Scan s = new Scan();
     s.setFilter(new FirstKeyOnlyFilter());
     // Expected KVs, the first KV from each of the remaining 6 rows
@@ -975,4 +964,17 @@ public class TestScannersWithFilters ext
     };
     verifyScanFull(s, kvs);
   }
+
+  public void testScannersWithFilters() throws Exception {
+    doTestNoFilter();
+    doTestPrefixFilter();
+    doTestPageFilter();
+    doTestInclusiveStopFilter();
+    doTestQualifierFilter();
+    doTestRowFilter();
+    doTestValueFilter();
+    doTestSkipFilter();
+    doTestFilterList();
+    doTestFirstKeyOnlyFilter();
+  }
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java Tue Sep 27 02:41:16 2011
@@ -37,23 +37,19 @@ import org.apache.hadoop.hbase.rest.mode
 import org.apache.hadoop.hbase.util.Bytes;
 
 public class TestSchemaResource extends HBaseRESTClusterTestBase {
-  private Client client;
-  private JAXBContext context;
-  private HBaseAdmin admin;
+  static String TABLE1 = "TestSchemaResource1";
+  static String TABLE2 = "TestSchemaResource2";
 
-  private static String TABLE1 = "TestSchemaResource1";
-  private static String TABLE2 = "TestSchemaResource2";
-
-  public TestSchemaResource() throws JAXBException {
-    super();
-    context = JAXBContext.newInstance(
-        ColumnSchemaModel.class,
-        TableSchemaModel.class);
-  }
+  Client client;
+  JAXBContext context;
+  HBaseAdmin admin;
 
   @Override
   protected void setUp() throws Exception {
     super.setUp();
+    context = JAXBContext.newInstance(
+        ColumnSchemaModel.class,
+        TableSchemaModel.class);
     admin = new HBaseAdmin(conf);
     client = new Client(new Cluster().add("localhost", testServletPort));
   }
@@ -64,19 +60,18 @@ public class TestSchemaResource extends 
     super.tearDown();
   }
 
-  private byte[] toXML(TableSchemaModel model) throws JAXBException {
+  byte[] toXML(TableSchemaModel model) throws JAXBException {
     StringWriter writer = new StringWriter();
     context.createMarshaller().marshal(model, writer);
     return Bytes.toBytes(writer.toString());
   }
 
-  private TableSchemaModel fromXML(byte[] content) throws JAXBException {
+  TableSchemaModel fromXML(byte[] content) throws JAXBException {
     return (TableSchemaModel) context.createUnmarshaller()
       .unmarshal(new ByteArrayInputStream(content));
   }
 
-  public void testTableCreateAndDeleteXML() 
-      throws IOException, JAXBException {
+  void doTestTableCreateAndDeleteXML() throws IOException, JAXBException {
     String schemaPath = "/" + TABLE1 + "/schema";
     TableSchemaModel model;
     Response response;
@@ -105,7 +100,7 @@ public class TestSchemaResource extends 
     assertFalse(admin.tableExists(TABLE1));
   }
 
-  public void testTableCreateAndDeletePB() throws IOException, JAXBException {
+  void doTestTableCreateAndDeletePB() throws IOException, JAXBException {
     String schemaPath = "/" + TABLE2 + "/schema";
     TableSchemaModel model;
     Response response;
@@ -135,4 +130,9 @@ public class TestSchemaResource extends 
     // make sure HBase concurs
     assertFalse(admin.tableExists(TABLE2));
   }
+
+  public void testSchemaResource() throws Exception {
+    doTestTableCreateAndDeleteXML();
+    doTestTableCreateAndDeletePB();
+  }
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java Tue Sep 27 02:41:16 2011
@@ -33,21 +33,17 @@ import org.apache.hadoop.hbase.rest.mode
 import org.apache.hadoop.hbase.util.Bytes;
 
 public class TestStatusResource extends HBaseRESTClusterTestBase {
-  private static final byte[] ROOT_REGION_NAME = Bytes.toBytes("-ROOT-,,0");
-  private static final byte[] META_REGION_NAME = Bytes.toBytes(".META.,,1");
+  static final byte[] ROOT_REGION_NAME = Bytes.toBytes("-ROOT-,,0");
+  static final byte[] META_REGION_NAME = Bytes.toBytes(".META.,,1");
 
-  private Client client;
-  private JAXBContext context;
+  Client client;
+  JAXBContext context;
   
-  public TestStatusResource() throws JAXBException {
-    super();
-    context = JAXBContext.newInstance(
-        StorageClusterStatusModel.class);
-  }
-
   @Override
   protected void setUp() throws Exception {
     super.setUp();
+    context = JAXBContext.newInstance(
+      StorageClusterStatusModel.class);
     client = new Client(new Cluster().add("localhost", testServletPort));
   }
 
@@ -57,7 +53,7 @@ public class TestStatusResource extends 
     super.tearDown();
   }
 
-  private void validate(StorageClusterStatusModel model) {
+  void validate(StorageClusterStatusModel model) {
     assertNotNull(model);
     assertTrue(model.getRegions() >= 2);
     assertTrue(model.getRequests() >= 0);
@@ -84,7 +80,7 @@ public class TestStatusResource extends 
     assertTrue(foundMeta);
   }
 
-  public void testGetClusterStatusXML() throws IOException, JAXBException {
+  void doTestGetClusterStatusXML() throws IOException, JAXBException {
     Response response = client.get("/status/cluster", MIMETYPE_XML);
     assertEquals(response.getCode(), 200);
     StorageClusterStatusModel model = (StorageClusterStatusModel)
@@ -93,11 +89,16 @@ public class TestStatusResource extends 
     validate(model);
   }
   
-  public void testGetClusterStatusPB() throws IOException {
+  void doTestGetClusterStatusPB() throws IOException {
     Response response = client.get("/status/cluster", MIMETYPE_PROTOBUF);
     assertEquals(response.getCode(), 200);
     StorageClusterStatusModel model = new StorageClusterStatusModel();
     model.getObjectFromMessage(response.getBody());
     validate(model);
   }
+
+  public void testStatusResource() throws Exception {
+    doTestGetClusterStatusXML();
+    doTestGetClusterStatusPB();
+  }
 }



Mime
View raw message