hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nspiegelb...@apache.org
Subject svn commit: r1176177 [13/13] - in /hbase/branches/0.89: ./ bin/ bin/replication/ docs/ src/ src/assembly/ src/docs/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/avro/ src/main/java/org/apache/hadoop/hbase/avro/generated/...
Date Tue, 27 Sep 2011 02:42:01 GMT
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java Tue Sep 27 02:41:56 2011
@@ -38,9 +38,6 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -54,8 +51,8 @@ import org.apache.hadoop.io.SequenceFile
 import org.apache.log4j.Level;
 
 /** JUnit test case for HLog */
-public class TestHLog extends HBaseTestCase implements HConstants {
-  static final Log LOG = LogFactory.getLog(TestHLog.class);
+public class TestHLog extends HBaseTestCase {
+  private static final Log LOG = LogFactory.getLog(TestHLog.class);
   {
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
@@ -139,14 +136,12 @@ public class TestHLog extends HBaseTestC
               System.currentTimeMillis());
           }
         }
-        log.hflush();
         log.rollWriter();
       }
-      Configuration new_conf = new Configuration(this.conf);
-      new_conf.setBoolean("dfs.support.append", false);
+      log.close();
       Path splitsdir = new Path(this.dir, "splits");
       List<Path> splits =
-        HLog.splitLog(splitsdir, logdir, this.oldLogDir, this.fs, new_conf);
+        HLog.splitLog(splitsdir, logdir, this.oldLogDir, this.fs, conf);
       verifySplits(splits, howmany);
       log = null;
     } finally {
@@ -530,4 +525,47 @@ public class TestHLog extends HBaseTestC
       }
     }
   }
+
+  /**
+   * Test that we can visit entries before they are appended
+   * @throws Exception
+   */
+  public void testVisitors() throws Exception {
+    final int COL_COUNT = 10;
+    final byte [] tableName = Bytes.toBytes("tablename");
+    final byte [] row = Bytes.toBytes("row");
+    this.conf.setBoolean("dfs.support.append", true);
+    HLog log = new HLog(this.fs, dir, this.oldLogDir, this.conf, null);
+    DumbLogEntriesVisitor visitor = new DumbLogEntriesVisitor();
+    log.addLogEntryVisitor(visitor);
+    long timestamp = System.currentTimeMillis();
+    HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
+        HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
+    for (int i = 0; i < COL_COUNT; i++) {
+      WALEdit cols = new WALEdit();
+      cols.add(new KeyValue(row, Bytes.toBytes("column"),
+          Bytes.toBytes(Integer.toString(i)),
+          timestamp, new byte[]{(byte) (i + '0')}));
+      log.append(hri, tableName, cols, System.currentTimeMillis());
+    }
+    assertEquals(COL_COUNT, visitor.increments);
+    log.removeLogEntryVisitor(visitor);
+    WALEdit cols = new WALEdit();
+    cols.add(new KeyValue(row, Bytes.toBytes("column"),
+        Bytes.toBytes(Integer.toString(11)),
+        timestamp, new byte[]{(byte) (11 + '0')}));
+    log.append(hri, tableName, cols, System.currentTimeMillis());
+    assertEquals(COL_COUNT, visitor.increments);
+  }
+
+  static class DumbLogEntriesVisitor implements LogEntryVisitor {
+
+    int increments = 0;
+
+    @Override
+    public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
+                                         WALEdit logEdit) {
+      increments++;
+    }
+  }
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java Tue Sep 27 02:41:56 2011
@@ -25,7 +25,6 @@ import static org.junit.Assert.assertTru
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.lang.reflect.Field;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -39,15 +38,12 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -125,17 +121,7 @@ public class TestHLogSplit {
     Collections.addAll(regions, "bbb", "ccc");
     InstrumentedSequenceFileLogWriter.activateFailure = false;
     // Set the soft lease for hdfs to be down from default of 5 minutes or so.
-    // TODO: If 0.20 hadoop do one thing, if 0.21 hadoop do another.
-    // Not available in 0.20 hdfs
-    // TEST_UTIL.getDFSCluster().getNamesystem().leaseManager.
-    //  setLeasePeriod(100, 50000);
-    // Use reflection to get at the 0.20 version of above.
-    MiniDFSCluster dfsCluster = TEST_UTIL.getDFSCluster();
-    //   private NameNode nameNode;
-    Field field = dfsCluster.getClass().getDeclaredField("nameNode");
-    field.setAccessible(true);
-    NameNode nn = (NameNode)field.get(dfsCluster);
-    nn.namesystem.leaseManager.setLeasePeriod(100, 50000);
+    TEST_UTIL.setNameNodeNameSystemLeasePeriod(100, 50000);
   }
 
   @After
@@ -628,7 +614,7 @@ public class TestHLogSplit {
     return new Path(HRegion.getRegionDir(HTableDescriptor
             .getTableDir(rootdir, table),
             HRegionInfo.encodeRegionName(region.getBytes())),
-            HConstants.HREGION_OLDLOGFILE_NAME);
+            HLog.RECOVERED_EDITS);
   }
 
   private void corruptHLog(Path path, Corruptions corruption, boolean close,
@@ -736,8 +722,8 @@ public class TestHLogSplit {
     FileStatus[] f2 = fs.listStatus(p2);
 
     for (int i=0; i<f1.length; i++) {
-      if (!logsAreEqual(new Path(f1[i].getPath(), HConstants.HREGION_OLDLOGFILE_NAME),
-              new Path(f2[i].getPath(), HConstants.HREGION_OLDLOGFILE_NAME))) {
+      if (!logsAreEqual(new Path(f1[i].getPath(), HLog.RECOVERED_EDITS),
+              new Path(f2[i].getPath(), HLog.RECOVERED_EDITS))) {
         return -1;
       }
     }

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,432 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.regionserver.FlushRequester;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test replay of edits out of a WAL split.
+ */
+public class TestWALReplay {
+  public static final Log LOG = LogFactory.getLog(TestWALReplay.class);
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private final EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
+  private Path hbaseRootDir = null;
+  private Path oldLogDir;
+  private Path logDir;
+  private FileSystem fs;
+  private Configuration conf;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    conf.setBoolean("dfs.support.append", true);
+    // The below config supported by 0.20-append and CDH3b2
+    conf.setInt("dfs.client.block.recovery.retries", 2);
+    conf.setInt("hbase.regionserver.flushlogentries", 1);
+    TEST_UTIL.startMiniDFSCluster(3);
+    TEST_UTIL.setNameNodeNameSystemLeasePeriod(100, 10000);
+    Path hbaseRootDir =
+      TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
+    LOG.info("hbase.rootdir=" + hbaseRootDir);
+    conf.set(HConstants.HBASE_DIR, hbaseRootDir.toString());
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniDFSCluster();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
+    this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
+    this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
+    this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
+    this.logDir = new Path(this.hbaseRootDir, HConstants.HREGION_LOGDIR_NAME);
+    if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
+      TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
+    }
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
+  }
+
+  /*
+   * @param p Directory to cleanup
+   */
+  private void deleteDir(final Path p) throws IOException {
+    if (this.fs.exists(p)) {
+      if (!this.fs.delete(p, true)) {
+        throw new IOException("Failed remove of " + p);
+      }
+    }
+  }
+
+  /**
+   * Test case of HRegion that is only made out of bulk loaded files.  Assert
+   * that we don't 'crash'.
+   * @throws IOException
+   * @throws IllegalAccessException
+   * @throws NoSuchFieldException
+   * @throws IllegalArgumentException
+   * @throws SecurityException
+   */
+  @Test
+  public void testRegionMadeOfBulkLoadedFilesOnly()
+  throws IOException, SecurityException, IllegalArgumentException,
+      NoSuchFieldException, IllegalAccessException {
+    final String tableNameStr = "testReplayEditsWrittenViaHRegion";
+    HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr);
+    Path basedir = new Path(this.hbaseRootDir, tableNameStr);
+    deleteDir(basedir);
+    HLog wal = createWAL(this.conf);
+    HRegion region = HRegion.openHRegion(hri, basedir, wal, this.conf);
+    Path f =  new Path(basedir, "hfile");
+    HFile.Writer writer = new HFile.Writer(this.fs, f);
+    byte [] family = hri.getTableDesc().getFamilies().iterator().next().getName();
+    byte [] row = Bytes.toBytes(tableNameStr);
+    writer.append(new KeyValue(row, family, family, row));
+    writer.close();
+    region.bulkLoadHFile(f.toString(), family);
+    // Add an edit so something in the WAL
+    region.put((new Put(row)).add(family, family, family));
+    wal.sync();
+
+    // Now 'crash' the region by stealing its wal
+    Configuration newConf = HBaseTestingUtility.setDifferentUser(this.conf,
+        tableNameStr);
+    runWALSplit(newConf);
+    HLog wal2 = createWAL(newConf);
+    HRegion region2 = new HRegion(basedir, wal2, FileSystem.get(newConf),
+      newConf, hri, null);
+    long seqid2 = region2.initialize();
+    assertTrue(seqid2 > -1);
+
+    // I can't close wal1.  Its been appropriated when we split.
+    region2.close();
+    wal2.closeAndDelete();
+  }
+
+  /**
+   * Test writing edits into an HRegion, closing it, splitting logs, opening
+   * Region again.  Verify seqids.
+   * @throws IOException
+   * @throws IllegalAccessException
+   * @throws NoSuchFieldException
+   * @throws IllegalArgumentException
+   * @throws SecurityException
+   */
+  @Test
+  public void testReplayEditsWrittenViaHRegion()
+  throws IOException, SecurityException, IllegalArgumentException,
+      NoSuchFieldException, IllegalAccessException {
+    final String tableNameStr = "testReplayEditsWrittenViaHRegion";
+    HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr);
+    Path basedir = new Path(this.hbaseRootDir, tableNameStr);
+    deleteDir(basedir);
+    final byte[] rowName = Bytes.toBytes(tableNameStr);
+    final int countPerFamily = 10;
+
+    // Write countPerFamily edits into the three families.  Do a flush on one
+    // of the families during the load of edits so its seqid is not same as
+    // others to test we do right thing when different seqids.
+    HLog wal = createWAL(this.conf);
+    HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, null);
+    long seqid = region.initialize();
+    // HRegionServer usually does this. It knows the largest seqid across all regions.
+    wal.setSequenceNumber(seqid);
+    boolean first = true;
+    for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
+      addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
+      if (first ) {
+        // If first, so we have at least one family w/ different seqid to rest.
+        region.flushcache();
+        first = false;
+      }
+    }
+    // Now assert edits made it in.
+    Get g = new Get(rowName);
+    Result result = region.get(g, null);
+    assertEquals(countPerFamily * hri.getTableDesc().getFamilies().size(),
+      result.size());
+    // Now close the region, split the log, reopen the region and assert that
+    // replay of log has no effect, that our seqids are calculated correctly so
+    // all edits in logs are seen as 'stale'/old.
+    region.close();
+    wal.close();
+    runWALSplit(this.conf);
+    HLog wal2 = createWAL(this.conf);
+    HRegion region2 = new HRegion(basedir, wal2, this.fs, this.conf, hri, null) {
+      @Override
+      protected void restoreEdit(KeyValue kv) throws IOException {
+        super.restoreEdit(kv);
+        throw new RuntimeException("Called when it should not have been!");
+      }
+    };
+    long seqid2 = region2.initialize();
+    // HRegionServer usually does this. It knows the largest seqid across all regions.
+    wal2.setSequenceNumber(seqid2);
+    assertTrue(seqid + result.size() < seqid2);
+
+    // Next test.  Add more edits, then 'crash' this region by stealing its wal
+    // out from under it and assert that replay of the log addes the edits back
+    // correctly when region is opened again.
+    for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
+      addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
+    }
+    // Get count of edits.
+    Result result2 = region2.get(g, null);
+    assertEquals(2 * result.size(), result2.size());
+    wal2.sync();
+    // Set down maximum recovery so we dfsclient doesn't linger retrying something
+    // long gone.
+    HBaseTestingUtility.setMaxRecoveryErrorCount(wal2.getOutputStream(), 1);
+    Configuration newConf = HBaseTestingUtility.setDifferentUser(this.conf,
+      tableNameStr);
+    runWALSplit(newConf);
+    FileSystem newFS = FileSystem.get(newConf);
+    // Make a new wal for new region open.
+    HLog wal3 = createWAL(newConf);
+    final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
+    HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, null) {
+      @Override
+      protected void restoreEdit(KeyValue kv) throws IOException {
+        super.restoreEdit(kv);
+        countOfRestoredEdits.incrementAndGet();
+      }
+    };
+    long seqid3 = region3.initialize();
+    // HRegionServer usually does this. It knows the largest seqid across all regions.
+    wal3.setSequenceNumber(seqid3);
+    Result result3 = region3.get(g, null);
+    // Assert that count of cells is same as before crash.
+    assertEquals(result2.size(), result3.size());
+    assertEquals(hri.getTableDesc().getFamilies().size() * countPerFamily,
+      countOfRestoredEdits.get());
+
+    // I can't close wal1.  Its been appropriated when we split.
+    region3.close();
+    wal3.closeAndDelete();
+  }
+
+  /**
+   * Create an HRegion with the result of a HLog split and test we only see the
+   * good edits
+   * @throws Exception
+   */
+  @Test
+  public void testReplayEditsWrittenIntoWAL() throws Exception {
+    final String tableNameStr = "testReplayEditsWrittenIntoWAL";
+    HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr);
+    Path basedir = new Path(hbaseRootDir, tableNameStr);
+    deleteDir(basedir);
+    HLog wal = createWAL(this.conf);
+    final byte[] tableName = Bytes.toBytes(tableNameStr);
+    final byte[] rowName = tableName;
+    final byte[] regionName = hri.getRegionName();
+
+    // Add 1k to each family.
+    final int countPerFamily = 1000;
+    for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
+      addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal);
+    }
+
+    // Add a cache flush, shouldn't have any effect
+    long logSeqId = wal.startCacheFlush();
+    wal.completeCacheFlush(regionName, tableName, logSeqId, hri.isMetaRegion());
+
+    // Add an edit to another family, should be skipped.
+    WALEdit edit = new WALEdit();
+    long now = ee.currentTimeMillis();
+    edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName,
+      now, rowName));
+    wal.append(hri, tableName, edit, now);
+
+    // Delete the c family to verify deletes make it over.
+    edit = new WALEdit();
+    now = ee.currentTimeMillis();
+    edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now,
+      KeyValue.Type.DeleteFamily));
+    wal.append(hri, tableName, edit, now);
+
+    // Sync.
+    wal.sync();
+    // Set down maximum recovery so we dfsclient doesn't linger retrying something
+    // long gone.
+    HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
+
+    // Make a new conf and a new fs for the splitter to run on so we can take
+    // over old wal.
+    Configuration newConf = HBaseTestingUtility.setDifferentUser(this.conf,
+      ".replay.wal.secondtime");
+    runWALSplit(newConf);
+    FileSystem newFS = FileSystem.get(newConf);
+    // 100k seems to make for about 4 flushes during HRegion#initialize.
+    newConf.setInt("hbase.hregion.memstore.flush.size", 1024 * 100);
+    // Make a new wal for new region.
+    HLog newWal = createWAL(newConf);
+    try {
+      TestFlusher flusher = new TestFlusher();
+      final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri,
+          flusher);
+      flusher.r = region;
+      long seqid = region.initialize();
+      // Assert we flushed.
+      assertTrue(flusher.count > 0);
+      assertTrue(seqid > wal.getSequenceNumber());
+
+      Get get = new Get(rowName);
+      Result result = region.get(get, -1);
+      // Make sure we only see the good edits
+      assertEquals(countPerFamily * (hri.getTableDesc().getFamilies().size() - 1),
+        result.size());
+      region.close();
+    } finally {
+      newWal.closeAndDelete();
+    }
+  }
+
+  // Flusher used in this test.  Keep count of how often we are called and
+  // actually run the flush inside here.
+  class TestFlusher implements FlushRequester {
+    private int count = 0;
+    private HRegion r;
+
+    @Override
+    public void request(HRegion region) {
+      count++;
+      try {
+        r.flushcache();
+      } catch (IOException e) {
+        throw new RuntimeException("Exception flushing", e);
+      }
+    }
+  }
+
+  private void addWALEdits (final byte [] tableName, final HRegionInfo hri,
+      final byte [] rowName, final byte [] family,
+      final int count, EnvironmentEdge ee, final HLog wal)
+  throws IOException {
+    String familyStr = Bytes.toString(family);
+    for (int j = 0; j < count; j++) {
+      byte[] qualifierBytes = Bytes.toBytes(Integer.toString(j));
+      byte[] columnBytes = Bytes.toBytes(familyStr + ":" + Integer.toString(j));
+      WALEdit edit = new WALEdit();
+      edit.add(new KeyValue(rowName, family, qualifierBytes,
+        ee.currentTimeMillis(), columnBytes));
+      wal.append(hri, tableName, edit, ee.currentTimeMillis());
+    }
+  }
+
+  private void addRegionEdits (final byte [] rowName, final byte [] family,
+      final int count, EnvironmentEdge ee, final HRegion r,
+      final String qualifierPrefix)
+  throws IOException {
+    for (int j = 0; j < count; j++) {
+      byte[] qualifier = Bytes.toBytes(qualifierPrefix + Integer.toString(j));
+      Put p = new Put(rowName);
+      p.add(family, qualifier, ee.currentTimeMillis(), rowName);
+      r.put(p);
+    }
+  }
+
+  /*
+   * Creates an HRI around an HTD that has <code>tableName</code> and three
+   * column families named 'a','b', and 'c'.
+   * @param tableName Name of table to use when we create HTableDescriptor.
+   */
+  private HRegionInfo createBasic3FamilyHRegionInfo(final String tableName) {
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
+    htd.addFamily(a);
+    HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
+    htd.addFamily(b);
+    HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
+    htd.addFamily(c);
+    return new HRegionInfo(htd, null, null, false);
+  }
+
+
+  /*
+   * Run the split.  Verify only single split file made.
+   * @param c
+   * @return The single split file made
+   * @throws IOException
+   */
+  private Path runWALSplit(final Configuration c) throws IOException {
+    FileSystem fs = FileSystem.get(c);
+    List<Path> splits = HLog.splitLog(this.hbaseRootDir, this.logDir,
+      this.oldLogDir, fs, c);
+    // Split should generate only 1 file since there's only 1 region
+    assertEquals(1, splits.size());
+    // Make sure the file exists
+    assertTrue(fs.exists(splits.get(0)));
+    LOG.info("Split file=" + splits.get(0));
+    return splits.get(0);
+  }
+
+  /*
+   * @param c
+   * @return WAL with retries set down from 5 to 1 only.
+   * @throws IOException
+   */
+  private HLog createWAL(final Configuration c) throws IOException {
+    HLog wal = new HLog(FileSystem.get(c), logDir, oldLogDir, c, null);
+    // Set down maximum recovery so we dfsclient doesn't linger retrying something
+    // long gone.
+    HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
+    return wal;
+  }
+}
\ No newline at end of file

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface;
+import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Source that does nothing at all, helpful to test ReplicationSourceManager
+ */
+public class ReplicationSourceDummy implements ReplicationSourceInterface {
+
+  ReplicationSourceManager manager;
+  String peerClusterId;
+  Path currentPath;
+
+  @Override
+  public void init(Configuration conf, FileSystem fs,
+                   ReplicationSourceManager manager, AtomicBoolean stopper,
+                   AtomicBoolean replicating, String peerClusterId)
+      throws IOException {
+    this.manager = manager;
+    this.peerClusterId = peerClusterId;
+  }
+
+  @Override
+  public void enqueueLog(Path log) {
+    this.currentPath = log;
+  }
+
+  @Override
+  public Path getCurrentPath() {
+    return this.currentPath;
+  }
+
+  @Override
+  public void startup() {
+
+  }
+
+  @Override
+  public void terminate() {
+
+  }
+
+  @Override
+  public String getPeerClusterZnode() {
+    return peerClusterId;
+  }
+
+  @Override
+  public void logArchived(Path oldPath, Path newPath) {
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplication.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplication.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplication.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplication.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,478 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.EmptyWatcher;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniZooKeeperCluster;
+import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public class TestReplication {
+
+  private static final Log LOG = LogFactory.getLog(TestReplication.class);
+
+  private static Configuration conf1;
+  private static Configuration conf2;
+
+  private static ZooKeeperWrapper zkw1;
+  private static ZooKeeperWrapper zkw2;
+
+  private static HTable htable1;
+  private static HTable htable2;
+
+  private static HBaseTestingUtility utility1;
+  private static HBaseTestingUtility utility2;
+  private static final int NB_ROWS_IN_BATCH = 100;
+  private static final long SLEEP_TIME = 500;
+  private static final int NB_RETRIES = 10;
+
+  private static final byte[] tableName = Bytes.toBytes("test");
+  private static final byte[] famName = Bytes.toBytes("f");
+  private static final byte[] row = Bytes.toBytes("row");
+  private static final byte[] noRepfamName = Bytes.toBytes("norep");
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    conf1 = HBaseConfiguration.create();
+    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
+    // smaller block size and capacity to trigger more operations
+    // and test them
+    conf1.setInt("hbase.regionserver.hlog.blocksize", 1024*20);
+    conf1.setInt("replication.source.nb.capacity", 5);
+    conf1.setLong("replication.source.sleepforretries", 100);
+    conf1.setInt("hbase.regionserver.maxlogs", 10);
+    conf1.setLong("hbase.master.logcleaner.ttl", 10);
+    conf1.setLong("hbase.client.retries.number", 4);
+    conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
+    conf1.setBoolean("dfs.support.append", true);
+    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
+
+    utility1 = new HBaseTestingUtility(conf1);
+    utility1.startMiniZKCluster();
+    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
+    zkw1 = ZooKeeperWrapper.createInstance(conf1, "cluster1");
+    zkw1.writeZNode("/1", "replication", "");
+    zkw1.writeZNode("/1/replication", "master",
+        conf1.get(HConstants.ZOOKEEPER_QUORUM)+":" +
+            conf1.get("hbase.zookeeper.property.clientPort")+":/1");
+    setIsReplication(true);
+
+    LOG.info("Setup first Zk");
+
+    conf2 = HBaseConfiguration.create();
+    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
+    conf2.setInt("hbase.client.retries.number", 6);
+    conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
+    conf2.setBoolean("dfs.support.append", true);
+
+    utility2 = new HBaseTestingUtility(conf2);
+    utility2.setZkCluster(miniZK);
+    zkw2 = ZooKeeperWrapper.createInstance(conf2, "cluster2");
+    zkw2.writeZNode("/2", "replication", "");
+    zkw2.writeZNode("/2/replication", "master",
+        conf1.get(HConstants.ZOOKEEPER_QUORUM)+":" +
+            conf1.get("hbase.zookeeper.property.clientPort")+":/1");
+
+    zkw1.writeZNode("/1/replication/peers", "1",
+        conf2.get(HConstants.ZOOKEEPER_QUORUM)+":" +
+            conf2.get("hbase.zookeeper.property.clientPort")+":/2");
+
+    LOG.info("Setup second Zk");
+
+    utility1.startMiniCluster(2);
+    utility2.startMiniCluster(2);
+
+    HTableDescriptor table = new HTableDescriptor(tableName);
+    table.setDeferredLogFlush(false);
+    HColumnDescriptor fam = new HColumnDescriptor(famName);
+    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
+    table.addFamily(fam);
+    fam = new HColumnDescriptor(noRepfamName);
+    table.addFamily(fam);
+    HBaseAdmin admin1 = new HBaseAdmin(conf1);
+    HBaseAdmin admin2 = new HBaseAdmin(conf2);
+    admin1.createTable(table);
+    admin2.createTable(table);
+
+    htable1 = new HTable(conf1, tableName);
+    htable1.setWriteBufferSize(1024*5);
+    htable2 = new HTable(conf2, tableName);
+  }
+
+  private static void setIsReplication(boolean rep) throws Exception {
+    LOG.info("Set rep " + rep);
+    zkw1.writeZNode("/1/replication", "state", Boolean.toString(rep));
+    // Takes some ms for ZK to fire the watcher
+    Thread.sleep(SLEEP_TIME);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @Before
+  public void setUp() throws Exception {
+    setIsReplication(false);
+    utility1.truncateTable(tableName);
+    utility2.truncateTable(tableName);
+    // If test is flaky, set that sleep higher
+    Thread.sleep(SLEEP_TIME*8);
+    setIsReplication(true);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    utility2.shutdownMiniCluster();
+    utility1.shutdownMiniCluster();
+  }
+
+  /**
+   * Add a row, check it's replicated, delete it, check's gone
+   * @throws Exception
+   */
+  @Test
+  public void testSimplePutDelete() throws Exception {
+    LOG.info("testSimplePutDelete");
+    Put put = new Put(row);
+    put.add(famName, row, row);
+
+    htable1 = new HTable(conf1, tableName);
+    htable1.put(put);
+
+    HTable table2 = new HTable(conf2, tableName);
+    Get get = new Get(row);
+    for (int i = 0; i < NB_RETRIES; i++) {
+      if (i==NB_RETRIES-1) {
+        fail("Waited too much time for put replication");
+      }
+      Result res = table2.get(get);
+      if (res.size() == 0) {
+        LOG.info("Row not available");
+        Thread.sleep(SLEEP_TIME);
+      } else {
+        assertArrayEquals(res.value(), row);
+        break;
+      }
+    }
+
+    Delete del = new Delete(row);
+    htable1.delete(del);
+
+    table2 = new HTable(conf2, tableName);
+    get = new Get(row);
+    for (int i = 0; i < NB_RETRIES; i++) {
+      if (i==NB_RETRIES-1) {
+        fail("Waited too much time for del replication");
+      }
+      Result res = table2.get(get);
+      if (res.size() >= 1) {
+        LOG.info("Row not deleted");
+        Thread.sleep(SLEEP_TIME);
+      } else {
+        break;
+      }
+    }
+  }
+
+  /**
+   * Try a small batch upload using the write buffer, check it's replicated
+   * @throws Exception
+   */
+  @Test
+  public void testSmallBatch() throws Exception {
+    LOG.info("testSmallBatch");
+    Put put;
+    // normal Batch tests
+    htable1.setAutoFlush(false);
+    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+      put = new Put(Bytes.toBytes(i));
+      put.add(famName, row, row);
+      htable1.put(put);
+    }
+    htable1.flushCommits();
+
+    Scan scan = new Scan();
+
+    ResultScanner scanner1 = htable1.getScanner(scan);
+    Result[] res1 = scanner1.next(NB_ROWS_IN_BATCH);
+    scanner1.close();
+    assertEquals(NB_ROWS_IN_BATCH, res1.length);
+
+    for (int i = 0; i < NB_RETRIES; i++) {
+      if (i==NB_RETRIES-1) {
+        fail("Waited too much time for normal batch replication");
+      }
+      ResultScanner scanner = htable2.getScanner(scan);
+      Result[] res = scanner.next(NB_ROWS_IN_BATCH);
+      scanner.close();
+      if (res.length != NB_ROWS_IN_BATCH) {
+        LOG.info("Only got " + res.length + " rows");
+        Thread.sleep(SLEEP_TIME);
+      } else {
+        break;
+      }
+    }
+
+    htable1.setAutoFlush(true);
+
+  }
+
+  /**
+   * Test stopping replication, trying to insert, make sure nothing's
+   * replicated, enable it, try replicating and it should work
+   * @throws Exception
+   */
+  @Test
+  public void testStartStop() throws Exception {
+
+    // Test stopping replication
+    setIsReplication(false);
+
+    Put put = new Put(Bytes.toBytes("stop start"));
+    put.add(famName, row, row);
+    htable1.put(put);
+
+    Get get = new Get(Bytes.toBytes("stop start"));
+    for (int i = 0; i < NB_RETRIES; i++) {
+      if (i==NB_RETRIES-1) {
+        break;
+      }
+      Result res = htable2.get(get);
+      if(res.size() >= 1) {
+        fail("Replication wasn't stopped");
+
+      } else {
+        LOG.info("Row not replicated, let's wait a bit more...");
+        Thread.sleep(SLEEP_TIME);
+      }
+    }
+
+    // Test restart replication
+    setIsReplication(true);
+
+    htable1.put(put);
+
+    for (int i = 0; i < NB_RETRIES; i++) {
+      if (i==NB_RETRIES-1) {
+        fail("Waited too much time for put replication");
+      }
+      Result res = htable2.get(get);
+      if(res.size() == 0) {
+        LOG.info("Row not available");
+        Thread.sleep(SLEEP_TIME);
+      } else {
+        assertArrayEquals(res.value(), row);
+        break;
+      }
+    }
+
+    put = new Put(Bytes.toBytes("do not rep"));
+    put.add(noRepfamName, row, row);
+    htable1.put(put);
+
+    get = new Get(Bytes.toBytes("do not rep"));
+    for (int i = 0; i < NB_RETRIES; i++) {
+      if (i == NB_RETRIES-1) {
+        break;
+      }
+      Result res = htable2.get(get);
+      if (res.size() >= 1) {
+        fail("Not supposed to be replicated");
+      } else {
+        LOG.info("Row not replicated, let's wait a bit more...");
+        Thread.sleep(SLEEP_TIME);
+      }
+    }
+
+  }
+
+  /**
+   * Do a more intense version testSmallBatch, one  that will trigger
+   * hlog rolling and other non-trivial code paths
+   * @throws Exception
+   */
+  @Test
+  public void loadTesting() throws Exception {
+    htable1.setWriteBufferSize(1024);
+    htable1.setAutoFlush(false);
+    for (int i = 0; i < NB_ROWS_IN_BATCH *10; i++) {
+      Put put = new Put(Bytes.toBytes(i));
+      put.add(famName, row, row);
+      htable1.put(put);
+    }
+    htable1.flushCommits();
+
+    Scan scan = new Scan();
+
+    ResultScanner scanner = htable1.getScanner(scan);
+    Result[] res = scanner.next(NB_ROWS_IN_BATCH * 100);
+    scanner.close();
+
+    assertEquals(NB_ROWS_IN_BATCH *10, res.length);
+
+    scan = new Scan();
+
+    for (int i = 0; i < NB_RETRIES; i++) {
+
+      scanner = htable2.getScanner(scan);
+      res = scanner.next(NB_ROWS_IN_BATCH * 100);
+      scanner.close();
+      if (res.length != NB_ROWS_IN_BATCH *10) {
+        if (i == NB_RETRIES-1) {
+          int lastRow = -1;
+          for (Result result : res) {
+            int currentRow = Bytes.toInt(result.getRow());
+            for (int row = lastRow+1; row < currentRow; row++) {
+              LOG.error("Row missing: " + row);
+            }
+            lastRow = currentRow;
+          }
+          LOG.error("Last row: " + lastRow);
+          fail("Waited too much time for normal batch replication, "
+              + res.length + " instead of " + NB_ROWS_IN_BATCH *10);
+        } else {
+          LOG.info("Only got " + res.length + " rows");
+          Thread.sleep(SLEEP_TIME);
+        }
+      } else {
+        break;
+      }
+    }
+  }
+
+  /**
+   * Load up multiple tables over 2 region servers and kill a source during
+   * the upload. The failover happens internally.
+   * @throws Exception
+   */
+  @Test
+  public void queueFailover() throws Exception {
+    utility1.createMultiRegions(htable1, famName);
+
+    // killing the RS with .META. can result into failed puts until we solve
+    // IO fencing
+    int rsToKill1 =
+        utility1.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0;
+    int rsToKill2 =
+        utility2.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0;
+
+    // Takes about 20 secs to run the full loading, kill around the middle
+    Thread killer1 = killARegionServer(utility1, 7500, rsToKill1);
+    Thread killer2 = killARegionServer(utility2, 10000, rsToKill2);
+
+    LOG.info("Start loading table");
+    int initialCount = utility1.loadTable(htable1, famName);
+    LOG.info("Done loading table");
+    killer1.join(5000);
+    killer2.join(5000);
+    LOG.info("Done waiting for threads");
+
+    Result[] res;
+    while (true) {
+      try {
+        Scan scan = new Scan();
+        ResultScanner scanner = htable1.getScanner(scan);
+        res = scanner.next(initialCount);
+        scanner.close();
+        break;
+      } catch (UnknownScannerException ex) {
+        LOG.info("Cluster wasn't ready yet, restarting scanner");
+      }
+    }
+    // Test we actually have all the rows, we may miss some because we
+    // don't have IO fencing.
+    if (res.length != initialCount) {
+      LOG.warn("We lost some rows on the master cluster!");
+      // We don't really expect the other cluster to have more rows
+      initialCount = res.length;
+    }
+
+    Scan scan2 = new Scan();
+
+    int lastCount = 0;
+
+    for (int i = 0; i < NB_RETRIES; i++) {
+      if (i==NB_RETRIES-1) {
+        fail("Waited too much time for queueFailover replication");
+      }
+      ResultScanner scanner2 = htable2.getScanner(scan2);
+      Result[] res2 = scanner2.next(initialCount * 2);
+      scanner2.close();
+      if (res2.length < initialCount) {
+        if (lastCount < res2.length) {
+          i--; // Don't increment timeout if we make progress
+        }
+        lastCount = res2.length;
+        LOG.info("Only got " + lastCount + " rows instead of " +
+            initialCount + " current i=" + i);
+        Thread.sleep(SLEEP_TIME*2);
+      } else {
+        break;
+      }
+    }
+  }
+
+  private static Thread killARegionServer(final HBaseTestingUtility utility,
+                                   final long timeout, final int rs) {
+    Thread killer = new Thread() {
+      public void run() {
+        try {
+          Thread.sleep(timeout);
+          utility.expireRegionServerSession(rs);
+        } catch (Exception e) {
+          LOG.error(e);
+        }
+      }
+    };
+    killer.start();
+    return killer;
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+public class TestReplicationSource {
+
+  private static final Log LOG =
+      LogFactory.getLog(TestReplicationSource.class);
+  private final static HBaseTestingUtility TEST_UTIL =
+      new HBaseTestingUtility();
+  private static FileSystem fs;
+  private static Path oldLogDir;
+  private static Path logDir;
+  private static Configuration conf = HBaseConfiguration.create();
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.startMiniDFSCluster(1);
+    fs = TEST_UTIL.getDFSCluster().getFileSystem();
+    oldLogDir = new Path(fs.getHomeDirectory(),
+        HConstants.HREGION_OLDLOGDIR_NAME);
+    logDir = new Path(fs.getHomeDirectory(),
+        HConstants.HREGION_LOGDIR_NAME);
+  }
+
+  /**
+   * Sanity check that we can move logs around while we are reading
+   * from them. Should this test fail, ReplicationSource would have a hard
+   * time reading logs that are being archived.
+   * @throws Exception
+   */
+  @Test
+  public void testLogMoving() throws Exception{
+    Path logPath = new Path(logDir, "log");
+    HLog.Writer writer = HLog.createWriter(fs, logPath, conf);
+    for(int i = 0; i < 3; i++) {
+      byte[] b = Bytes.toBytes(Integer.toString(i));
+      KeyValue kv = new KeyValue(b,b,b);
+      WALEdit edit = new WALEdit();
+      edit.add(kv);
+      HLogKey key = new HLogKey(b, b, 0, 0);
+      writer.append(new HLog.Entry(key, edit));
+      writer.sync();
+    }
+    writer.close();
+
+    HLog.Reader reader = HLog.getReader(fs, logPath, conf);
+    HLog.Entry entry = reader.next();
+    assertNotNull(entry);
+
+    Path oldLogPath = new Path(oldLogDir, "log");
+    fs.rename(logPath, oldLogPath);
+
+    entry = reader.next();
+    assertNotNull(entry);
+
+    entry = reader.next();
+    entry = reader.next();
+
+    assertNull(entry);
+
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestReplicationSink {
+
+  private static final Log LOG =
+      LogFactory.getLog(TestReplicationSink.class);
+
+  private static final int BATCH_SIZE = 10;
+
+  private static final long SLEEP_TIME = 500;
+
+  private final static Configuration conf = HBaseConfiguration.create();
+
+  private final static HBaseTestingUtility TEST_UTIL =
+      new HBaseTestingUtility();
+
+  private static ReplicationSink SINK;
+
+  private static final byte[] TABLE_NAME1 =
+      Bytes.toBytes("table1");
+  private static final byte[] TABLE_NAME2 =
+      Bytes.toBytes("table2");
+
+  private static final byte[] FAM_NAME1 = Bytes.toBytes("info1");
+  private static final byte[] FAM_NAME2 = Bytes.toBytes("info2");
+
+  private static final AtomicBoolean STOPPER = new AtomicBoolean(false);
+
+  private static HTable table1;
+
+  private static HTable table2;
+
+   /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
+    TEST_UTIL.getConfiguration().setBoolean(
+        HConstants.REPLICATION_ENABLE_KEY, true);
+    TEST_UTIL.startMiniCluster(3);
+    conf.setBoolean("dfs.support.append", true);
+    SINK = new ReplicationSink(conf,STOPPER);
+    table1 = TEST_UTIL.createTable(TABLE_NAME1, FAM_NAME1);
+    table2 = TEST_UTIL.createTable(TABLE_NAME2, FAM_NAME2);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    STOPPER.set(true);
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @Before
+  public void setUp() throws Exception {
+    table1 = TEST_UTIL.truncateTable(TABLE_NAME1);
+    table2 = TEST_UTIL.truncateTable(TABLE_NAME2);
+    Thread.sleep(SLEEP_TIME);
+  }
+
+  /**
+   * Insert a whole batch of entries
+   * @throws Exception
+   */
+  @Test
+  public void testBatchSink() throws Exception {
+    HLog.Entry[] entries = new HLog.Entry[BATCH_SIZE];
+    for(int i = 0; i < BATCH_SIZE; i++) {
+      entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put);
+    }
+    SINK.replicateEntries(entries);
+    Scan scan = new Scan();
+    ResultScanner scanRes = table1.getScanner(scan);
+    assertEquals(BATCH_SIZE, scanRes.next(BATCH_SIZE).length);
+  }
+
+  /**
+   * Insert a mix of puts and deletes
+   * @throws Exception
+   */
+  @Test
+  public void testMixedPutDelete() throws Exception {
+    HLog.Entry[] entries = new HLog.Entry[BATCH_SIZE/2];
+    for(int i = 0; i < BATCH_SIZE/2; i++) {
+      entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put);
+    }
+    SINK.replicateEntries(entries);
+
+    entries = new HLog.Entry[BATCH_SIZE];
+    for(int i = 0; i < BATCH_SIZE; i++) {
+      entries[i] = createEntry(TABLE_NAME1, i,
+          i % 2 != 0 ? KeyValue.Type.Put: KeyValue.Type.DeleteColumn);
+    }
+
+    SINK.replicateEntries(entries);
+    Scan scan = new Scan();
+    ResultScanner scanRes = table1.getScanner(scan);
+    assertEquals(BATCH_SIZE/2, scanRes.next(BATCH_SIZE).length);
+  }
+
+  /**
+   * Insert to 2 different tables
+   * @throws Exception
+   */
+  @Test
+  public void testMixedPutTables() throws Exception {
+    HLog.Entry[] entries = new HLog.Entry[BATCH_SIZE];
+    for(int i = 0; i < BATCH_SIZE; i++) {
+      entries[i] =
+          createEntry( i % 2 == 0 ? TABLE_NAME2 : TABLE_NAME1,
+              i, KeyValue.Type.Put);
+    }
+
+    SINK.replicateEntries(entries);
+    Scan scan = new Scan();
+    ResultScanner scanRes = table2.getScanner(scan);
+    for(Result res : scanRes) {
+      assertTrue(Bytes.toInt(res.getRow()) % 2 == 0);
+    }
+  }
+
+  /**
+   * Insert then do different types of deletes
+   * @throws Exception
+   */
+  @Test
+  public void testMixedDeletes() throws Exception {
+    HLog.Entry[] entries = new HLog.Entry[3];
+    for(int i = 0; i < 3; i++) {
+      entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put);
+    }
+    SINK.replicateEntries(entries);
+    entries = new HLog.Entry[3];
+
+    entries[0] = createEntry(TABLE_NAME1, 0, KeyValue.Type.DeleteColumn);
+    entries[1] = createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily);
+    entries[2] = createEntry(TABLE_NAME1, 2, KeyValue.Type.DeleteColumn);
+
+    SINK.replicateEntries(entries);
+
+    Scan scan = new Scan();
+    ResultScanner scanRes = table1.getScanner(scan);
+    assertEquals(0, scanRes.next(3).length);
+  }
+
+  /**
+   * Puts are buffered, but this tests when a delete (not-buffered) is applied
+   * before the actual Put that creates it.
+   * @throws Exception
+   */
+  @Test
+  public void testApplyDeleteBeforePut() throws Exception {
+    HLog.Entry[] entries = new HLog.Entry[5];
+    for(int i = 0; i < 2; i++) {
+      entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put);
+    }
+    entries[2] = createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily);
+    for(int i = 3; i < 5; i++) {
+      entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put);
+    }
+    SINK.replicateEntries(entries);
+    Get get = new Get(Bytes.toBytes(1));
+    Result res = table1.get(get);
+    assertEquals(0, res.size());
+  }
+
+  private HLog.Entry createEntry(byte [] table, int row,  KeyValue.Type type) {
+    byte[] fam = Bytes.equals(table, TABLE_NAME1) ? FAM_NAME1 : FAM_NAME2;
+    byte[] rowBytes = Bytes.toBytes(row);
+    // Just make sure we don't get the same ts for two consecutive rows with
+    // same key
+    try {
+      Thread.sleep(1);
+    } catch (InterruptedException e) {
+      LOG.info("Was interrupted while sleep, meh", e);
+    }
+    final long now = System.currentTimeMillis();
+    KeyValue kv = null;
+    if(type.getCode() == KeyValue.Type.Put.getCode()) {
+      kv = new KeyValue(rowBytes, fam, fam, now,
+          KeyValue.Type.Put, Bytes.toBytes(row));
+    } else if (type.getCode() == KeyValue.Type.DeleteColumn.getCode()) {
+        kv = new KeyValue(rowBytes, fam, fam,
+            now, KeyValue.Type.DeleteColumn);
+    } else if (type.getCode() == KeyValue.Type.DeleteFamily.getCode()) {
+        kv = new KeyValue(rowBytes, fam, null,
+            now, KeyValue.Type.DeleteFamily);
+    }
+
+    HLogKey key = new HLogKey(table, table, now, now);
+
+    WALEdit edit = new WALEdit();
+    edit.add(kv);
+
+    return new HLog.Entry(key, edit);
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.replication.ReplicationSourceDummy;
+import org.apache.hadoop.hbase.replication.ReplicationZookeeperWrapper;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.net.URLEncoder;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestReplicationSourceManager {
+
+  private static final Log LOG =
+      LogFactory.getLog(TestReplicationSourceManager.class);
+
+  private static Configuration conf;
+
+  private static HBaseTestingUtility utility;
+
+  private static final AtomicBoolean STOPPER = new AtomicBoolean(false);
+
+  private static final AtomicBoolean REPLICATING = new AtomicBoolean(false);
+
+  private static ReplicationSourceManager manager;
+
+  private static ZooKeeperWrapper zkw;
+
+  private static HTableDescriptor htd;
+
+  private static HRegionInfo hri;
+
+  private static final byte[] r1 = Bytes.toBytes("r1");
+
+  private static final byte[] r2 = Bytes.toBytes("r2");
+
+  private static final byte[] f1 = Bytes.toBytes("f1");
+
+  private static final byte[] f2 = Bytes.toBytes("f2");
+
+  private static final byte[] test = Bytes.toBytes("test");
+
+  private static FileSystem fs;
+
+  private static Path oldLogDir;
+
+  private static Path logDir;
+
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+
+    conf = HBaseConfiguration.create();
+    conf.set("replication.replicationsource.implementation",
+        ReplicationSourceDummy.class.getCanonicalName());
+    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
+    utility = new HBaseTestingUtility(conf);
+    utility.startMiniZKCluster();
+
+    zkw = ZooKeeperWrapper.createInstance(conf, "test");
+    zkw.writeZNode("/hbase", "replication", "");
+    zkw.writeZNode("/hbase/replication", "master",
+        conf.get(HConstants.ZOOKEEPER_QUORUM)+":" +
+    conf.get("hbase.zookeeper.property.clientPort")+":/1");
+    zkw.writeZNode("/hbase/replication/peers", "1",
+          conf.get(HConstants.ZOOKEEPER_QUORUM)+":" +
+          conf.get("hbase.zookeeper.property.clientPort")+":/1");
+
+    HRegionServer server = new HRegionServer(conf);
+    ReplicationZookeeperWrapper helper = new ReplicationZookeeperWrapper(
+        server.getZooKeeperWrapper(), conf,
+        REPLICATING, "123456789");
+    fs = FileSystem.get(conf);
+    oldLogDir = new Path(utility.getTestDir(),
+        HConstants.HREGION_OLDLOGDIR_NAME);
+    logDir = new Path(utility.getTestDir(),
+        HConstants.HREGION_LOGDIR_NAME);
+
+    manager = new ReplicationSourceManager(helper,
+        conf, STOPPER, fs, REPLICATING, oldLogDir);
+    manager.addSource("1");
+
+    htd = new HTableDescriptor(test);
+    HColumnDescriptor col = new HColumnDescriptor("f1");
+    col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
+    htd.addFamily(col);
+    col = new HColumnDescriptor("f2");
+    col.setScope(HConstants.REPLICATION_SCOPE_LOCAL);
+    htd.addFamily(col);
+
+    hri = new HRegionInfo(htd, r1, r2);
+
+
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    manager.join();
+    utility.shutdownMiniCluster();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    fs.delete(logDir, true);
+    fs.delete(oldLogDir, true);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    setUp();
+  }
+
+  @Test
+  public void testLogRoll() throws Exception {
+    long seq = 0;
+    long baseline = 1000;
+    long time = baseline;
+    KeyValue kv = new KeyValue(r1, f1, r1);
+    WALEdit edit = new WALEdit();
+    edit.add(kv);
+
+    HLog hlog = new HLog(fs, logDir, oldLogDir, conf, null, manager,
+      URLEncoder.encode("regionserver:60020", "UTF8"));
+
+    manager.init();
+
+    // Testing normal log rolling every 20
+    for(long i = 1; i < 101; i++) {
+      if(i > 1 && i % 20 == 0) {
+        hlog.rollWriter();
+      }
+      LOG.info(i);
+      HLogKey key = new HLogKey(hri.getRegionName(),
+        test, seq++, System.currentTimeMillis());
+      hlog.append(hri, key, edit);
+    }
+
+    // Simulate a rapid insert that's followed
+    // by a report that's still not totally complete (missing last one)
+    LOG.info(baseline + " and " + time);
+    baseline += 101;
+    time = baseline;
+    LOG.info(baseline + " and " + time);
+
+    for (int i = 0; i < 3; i++) {
+      HLogKey key = new HLogKey(hri.getRegionName(),
+        test, seq++, System.currentTimeMillis());
+      hlog.append(hri, key, edit);
+    }
+
+    assertEquals(6, manager.getHLogs().size());
+
+    hlog.rollWriter();
+
+    manager.logPositionAndCleanOldLogs(manager.getSources().get(0).getCurrentPath(),
+        "1", 0, false);
+
+    HLogKey key = new HLogKey(hri.getRegionName(),
+          test, seq++, System.currentTimeMillis());
+    hlog.append(hri, key, edit);
+
+    assertEquals(1, manager.getHLogs().size());
+
+
+    // TODO Need a case with only 2 HLogs and we only want to delete the first one
+  }
+
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java Tue Sep 27 02:41:56 2011
@@ -1,3 +1,22 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hbase.rest;
 
 import org.apache.commons.logging.Log;

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java Tue Sep 27 02:41:56 2011
@@ -88,7 +88,7 @@ public class TestByteBloomFilter extends
     for (int i = 0; i < 12; ++i) {
       b.add(Bytes.toBytes(i));
     }
-    b.finalize();
+    b.compactBloom();
     assertEquals(origSize>>2, b.getByteSize());
     int falsePositives = 0;
     for (int i = 0; i < 25; ++i) {
@@ -118,7 +118,7 @@ public class TestByteBloomFilter extends
 
     // fold
     startTime = System.currentTimeMillis();
-    b.finalize();
+    b.compactBloom();
     endTime = System.currentTimeMillis();
     System.out.println("Total Fold time = " + (endTime - startTime) + "ms");
     assertTrue(origSize >= b.getByteSize()<<3);



Mime
View raw message