hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject hbase git commit: HBASE-12490 Replace uses of setAutoFlush(boolean, boolean) (Solomon Duskis)
Date Tue, 02 Dec 2014 18:06:32 GMT
Repository: hbase
Updated Branches:
  refs/heads/master 1a9b55647 -> 7a3396f0e


HBASE-12490 Replace uses of setAutoFlush(boolean, boolean) (Solomon Duskis)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7a3396f0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7a3396f0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7a3396f0

Branch: refs/heads/master
Commit: 7a3396f0e1ebf390038d71e190c2d33ee813ade3
Parents: 1a9b556
Author: stack <stack@apache.org>
Authored: Tue Dec 2 10:06:21 2014 -0800
Committer: stack <stack@apache.org>
Committed: Tue Dec 2 10:06:21 2014 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/HTable.java     | 17 ++++++-----------
 .../hadoop/hbase/client/TestAsyncProcess.java      | 10 +++++-----
 .../hbase/test/IntegrationTestBigLinkedList.java   |  2 +-
 ...IntegrationTestBigLinkedListWithVisibility.java |  2 +-
 .../hbase/test/IntegrationTestLoadAndVerify.java   |  2 +-
 .../trace/IntegrationTestSendTraceRequests.java    |  2 +-
 .../hadoop/hbase/rest/PerformanceEvaluation.java   |  2 +-
 .../hbase/mapreduce/MultiTableOutputFormat.java    |  2 +-
 .../hadoop/hbase/mapreduce/TableOutputFormat.java  |  2 +-
 .../hadoop/hbase/client/TestFromClientSide.java    |  6 +++---
 .../hadoop/hbase/client/TestMultiParallel.java     |  2 +-
 .../hbase/coprocessor/TestHTableWrapper.java       |  2 +-
 .../hbase/master/TestDistributedLogSplitting.java  |  4 ++--
 .../regionserver/TestRegionServerMetrics.java      |  2 +-
 .../hbase/regionserver/wal/TestLogRolling.java     |  2 +-
 .../TestReplicationChangingPeerRegionservers.java  |  2 +-
 .../replication/TestReplicationSmallTests.java     |  6 +++---
 .../hbase/snapshot/SnapshotTestingUtils.java       |  2 +-
 18 files changed, 32 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 61cb8b8..a88a265 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -116,18 +116,17 @@ public class HTable implements HTableInterface, RegionLocator {
   private TableConfiguration tableConfiguration;
   protected List<Row> writeAsyncBuffer = new LinkedList<Row>();
   private long writeBufferSize;
-  private boolean clearBufferOnFail;
-  private boolean autoFlush;
-  protected long currentWriteBufferSize;
+  private boolean clearBufferOnFail = true;
+  private boolean autoFlush = true;
+  protected long currentWriteBufferSize = 0 ;
+  private boolean closed = false;
   protected int scannerCaching;
   private ExecutorService pool;  // For Multi & Scan
-  private boolean closed;
   private int operationTimeout;
   private final boolean cleanupPoolOnClose; // shutdown the pool in close()
   private final boolean cleanupConnectionOnClose; // close the connection in close()
   private Consistency defaultConsistency = Consistency.STRONG;
 
-
   /** The Async process for puts with autoflush set to false or multiputs */
   protected AsyncProcess ap;
   /** The Async process for batch */
@@ -326,9 +325,10 @@ public class HTable implements HTableInterface, RegionLocator {
 
   /**
    * For internal testing.
+   * @throws IOException
    */
   @VisibleForTesting
-  protected HTable() {
+  protected HTable() throws IOException {
     tableName = null;
     tableConfiguration = new TableConfiguration();
     cleanupPoolOnClose = false;
@@ -353,9 +353,6 @@ public class HTable implements HTableInterface, RegionLocator {
     this.operationTimeout = tableName.isSystemTable() ?
         tableConfiguration.getMetaOperationTimeout() : tableConfiguration.getOperationTimeout();
     this.writeBufferSize = tableConfiguration.getWriteBufferSize();
-    this.clearBufferOnFail = true;
-    this.autoFlush = true;
-    this.currentWriteBufferSize = 0;
     this.scannerCaching = tableConfiguration.getScannerCaching();
 
     if (this.rpcCallerFactory == null) {
@@ -368,8 +365,6 @@ public class HTable implements HTableInterface, RegionLocator {
     // puts need to track errors globally due to how the APIs currently work.
     ap = new AsyncProcess(connection, configuration, pool, rpcCallerFactory, true, rpcControllerFactory);
     multiAp = this.connection.getAsyncProcess();
-
-    this.closed = false;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 8d77d7a..8a3aafc 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -663,7 +663,7 @@ public class TestAsyncProcess {
     HTable ht = new HTable();
     MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, true);
     ht.ap = ap;
-    ht.setAutoFlush(true, true);
+    ht.setAutoFlushTo(true);
     if (bufferOn) {
       ht.setWriteBufferSize(1024L * 1024L);
     } else {
@@ -711,7 +711,7 @@ public class TestAsyncProcess {
     HTable ht = new HTable();
     MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, true);
     ht.ap = ap;
-    ht.setAutoFlush(false, true);
+    ht.setAutoFlushTo(false);
     ht.setWriteBufferSize(0);
 
     Put p = createPut(1, false);
@@ -739,7 +739,7 @@ public class TestAsyncProcess {
   public void testWithNoClearOnFail() throws IOException {
     HTable ht = new HTable();
     ht.ap = new MyAsyncProcess(createHConnection(), conf, true);
-    ht.setAutoFlush(false, false);
+    ht.setAutoFlush(false);
 
     Put p = createPut(1, false);
     ht.put(p);
@@ -806,7 +806,7 @@ public class TestAsyncProcess {
     ht.ap.serverTrackerTimeout = 1;
 
     Put p = createPut(1, false);
-    ht.setAutoFlush(false, false);
+    ht.setAutoFlush(false);
     ht.put(p);
 
     try {
@@ -828,7 +828,7 @@ public class TestAsyncProcess {
     Assert.assertNotNull(ht.ap.createServerErrorTracker());
 
     Put p = createPut(1, true);
-    ht.setAutoFlush(false, false);
+    ht.setAutoFlush(false);
     ht.put(p);
 
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 177341f..37e4f8b 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -363,7 +363,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase
{
 
       protected void instantiateHTable(Configuration conf) throws IOException {
         table = new HTable(conf, getTableName(conf));
-        table.setAutoFlush(false, true);
+        table.setAutoFlushTo(false);
         table.setWriteBufferSize(4 * 1024 * 1024);
       }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
index 603c3df..dc517a5 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
@@ -185,7 +185,7 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB
       protected void instantiateHTable(Configuration conf) throws IOException {
         for (int i = 0; i < DEFAULT_TABLES_COUNT; i++) {
           HTable table = new HTable(conf, getTableName(i));
-          table.setAutoFlush(true, true);
+          table.setAutoFlushTo(true);
           //table.setWriteBufferSize(4 * 1024 * 1024);
           this.tables[i] = table;
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
index 5c9a9ad..60f20a5 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
@@ -181,7 +181,7 @@ public void cleanUpCluster() throws Exception {
       numBackReferencesPerRow = conf.getInt(NUM_BACKREFS_KEY, NUM_BACKREFS_DEFAULT);
       table = new HTable(conf, TableName.valueOf(tableName));
       table.setWriteBufferSize(4*1024*1024);
-      table.setAutoFlush(false, true);
+      table.setAutoFlushTo(false);
 
       String taskId = conf.get("mapreduce.task.attempt.id");
       Matcher matcher = Pattern.compile(".+_m_(\\d+_\\d+)").matcher(taskId);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
index c96a6ac..b1cf57e 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
@@ -239,7 +239,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool
{
     for (int x = 0; x < 5000; x++) {
       TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
       try {
-        ht.setAutoFlush(false, true);
+        ht.setAutoFlushTo(false);
         for (int i = 0; i < 5; i++) {
           long rk = random.nextLong();
           rowKeys.add(rk);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
index 7e17c01..b02f069 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
@@ -909,7 +909,7 @@ public class PerformanceEvaluation extends Configured implements Tool
{
 
     void testSetup() throws IOException {
       this.table = connection.getTable(tableName);
-      this.table.setAutoFlush(false, true);
+      this.table.setAutoFlushTo(false);
     }
 
     void testTakedown()  throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
index 62a9626..20cf50a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
@@ -104,7 +104,7 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
       if (!tables.containsKey(tableName)) {
         LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get())+ "\" for writing");
         HTable table = new HTable(conf, TableName.valueOf(tableName.get()));
-        table.setAutoFlush(false, true);
+        table.setAutoFlushTo(false);
         tables.put(tableName, table);
       }
       return tables.get(tableName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 415fd3d..107e7b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -194,7 +194,7 @@ implements Configurable {
       }
       this.connection = ConnectionFactory.createConnection(this.conf);
       this.table = connection.getTable(TableName.valueOf(tableName));
-      ((HTable) this.table).setAutoFlush(false, true);
+      this.table.setAutoFlushTo(false);
       LOG.info("Created table instance for "  + tableName);
     } catch(IOException e) {
       LOG.error(e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 31ed8176..9554d97 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -3907,7 +3907,7 @@ public class TestFromClientSide {
     final int NB_BATCH_ROWS = 10;
     HTable table = TEST_UTIL.createTable(Bytes.toBytes("testRowsPutBufferedOneFlush"),
       new byte [][] {CONTENTS_FAMILY, SMALL_FAMILY});
-    table.setAutoFlush(false, true);
+    table.setAutoFlushTo(false);
     ArrayList<Put> rowsUpdate = new ArrayList<Put>();
     for (int i = 0; i < NB_BATCH_ROWS * 10; i++) {
       byte[] row = Bytes.toBytes("row" + i);
@@ -3948,7 +3948,7 @@ public class TestFromClientSide {
     final int NB_BATCH_ROWS = 10;
     HTable table = TEST_UTIL.createTable(Bytes.toBytes("testRowsPutBufferedManyManyFlushes"),
       new byte[][] {CONTENTS_FAMILY, SMALL_FAMILY });
-    table.setAutoFlush(false, true);
+    table.setAutoFlushTo(false);
     table.setWriteBufferSize(10);
     ArrayList<Put> rowsUpdate = new ArrayList<Put>();
     for (int i = 0; i < NB_BATCH_ROWS * 10; i++) {
@@ -4277,7 +4277,7 @@ public class TestFromClientSide {
           new byte[][] { HConstants.CATALOG_FAMILY, Bytes.toBytes("info2") }, 1, 1024);
     // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow
     // in Store.rowAtOrBeforeFromStoreFile
-    table.setAutoFlush(true);
+    table.setAutoFlushTo(true);
     String regionName = table.getRegionLocations().firstKey().getEncodedName();
     HRegion region =
         TEST_UTIL.getRSForFirstRegionInTable(tableAname).getFromOnlineRegions(regionName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
index 69267ec..47bb569 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
@@ -263,7 +263,7 @@ public class TestMultiParallel {
     // Load the data
     LOG.info("get new table");
     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
-    table.setAutoFlush(false, true);
+    table.setAutoFlushTo(false);
     table.setWriteBufferSize(10 * 1024 * 1024);
 
     LOG.info("constructPutRequests");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java
index de0057c..4649961 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java
@@ -178,7 +178,7 @@ public class TestHTableWrapper {
     boolean initialAutoFlush = hTableInterface.isAutoFlush();
     hTableInterface.setAutoFlushTo(false);
     assertFalse(hTableInterface.isAutoFlush());
-    hTableInterface.setAutoFlush(true, true);
+    hTableInterface.setAutoFlushTo(true);
     assertTrue(hTableInterface.isAutoFlush());
     hTableInterface.setAutoFlushTo(initialAutoFlush);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index 5e867ad..f37c1eb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -921,7 +921,7 @@ public class TestDistributedLogSplitting {
       if (key == null || key.length == 0) {
         key = new byte[] { 0, 0, 0, 0, 1 };
       }
-      ht.setAutoFlush(true, true);
+      ht.setAutoFlushTo(true);
       Put put = new Put(key);
       put.add(Bytes.toBytes("family"), Bytes.toBytes("c1"), new byte[]{'b'});
       ht.put(put);
@@ -1612,7 +1612,7 @@ public class TestDistributedLogSplitting {
    * Load table with puts and deletes with expected values so that we can verify later
    */
   private void prepareData(final HTable t, final byte[] f, final byte[] column) throws IOException
{
-    t.setAutoFlush(false, true);
+    t.setAutoFlushTo(false);
     byte[] k = new byte[3];
 
     // add puts

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index 3ae82ee..d3285a3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -348,7 +348,7 @@ public class TestRegionServerMetrics {
 
     TEST_UTIL.createTable(tableName, cf);
     HTable t = new HTable(conf, tableName);
-    t.setAutoFlush(false, true);
+    t.setAutoFlushTo(false);
     for (int insertCount =0; insertCount < 100; insertCount++) {
       Put p = new Put(Bytes.toBytes("" + insertCount + "row"));
       p.add(cf, qualifier, val);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
index 9c6584e..86e77ad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
@@ -456,7 +456,7 @@ public class TestLogRolling  {
 
       writeData(table, 1002);
 
-      table.setAutoFlush(true, true);
+      table.setAutoFlushTo(true);
 
       long curTime = System.currentTimeMillis();
       LOG.info("log.getCurrentFileName()): " + DefaultWALProvider.getCurrentFileName(log));

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
index 0319607..67f2031 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
@@ -54,7 +54,7 @@ public class TestReplicationChangingPeerRegionservers extends TestReplicationBas
    */
   @Before
   public void setUp() throws Exception {
-    ((HTable)htable1).setAutoFlush(false, true);
+    htable1.setAutoFlushTo(false);
     // Starting and stopping replication can make us miss new logs,
     // rolling like this makes sure the most recent one gets added to the queue
     for (JVMClusterUtil.RegionServerThread r :

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
index c12089a..4377082 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
@@ -69,7 +69,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
    */
   @Before
   public void setUp() throws Exception {
-    ((HTable)htable1).setAutoFlush(true, true);
+    htable1.setAutoFlushTo(true);
     // Starting and stopping replication can make us miss new logs,
     // rolling like this makes sure the most recent one gets added to the queue
     for ( JVMClusterUtil.RegionServerThread r :
@@ -247,7 +247,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
     LOG.info("testSmallBatch");
     Put put;
     // normal Batch tests
-    ((HTable)htable1).setAutoFlush(false, true);
+    htable1.setAutoFlushTo(false);
     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
       put = new Put(Bytes.toBytes(i));
       put.add(famName, row, row);
@@ -387,7 +387,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
   public void testLoading() throws Exception {
     LOG.info("Writing out rows to table1 in testLoading");
     htable1.setWriteBufferSize(1024);
-    ((HTable)htable1).setAutoFlush(false, true);
+    ((HTable)htable1).setAutoFlushTo(false);
     for (int i = 0; i < NB_ROWS_IN_BIG_BATCH; i++) {
       Put put = new Put(Bytes.toBytes(i));
       put.add(famName, row, row);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a3396f0/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index a3d1aac..cebb3c4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -678,7 +678,7 @@ public class SnapshotTestingUtils {
 
   public static void loadData(final HBaseTestingUtility util, final HTable table, int rows,
       byte[]... families) throws IOException, InterruptedException {
-    table.setAutoFlush(false, true);
+    table.setAutoFlushTo(false);
 
     // Ensure one row per region
     assertTrue(rows >= KEYS.length);


Mime
View raw message