hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [1/2] hbase git commit: HBASE-12519 Remove tabs used as whitespace (Varun Saxena)
Date Sat, 29 Nov 2014 17:24:20 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 7eefa36ac -> b12d57783


http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
index 523692a..d1817db 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
@@ -54,21 +54,21 @@ public class PlainTextMessageBodyProducer
     return true;
   }
 
-	@Override
-	public long getSize(Object object, Class<?> type, Type genericType,
-			Annotation[] annotations, MediaType mediaType) {
+  @Override
+  public long getSize(Object object, Class<?> type, Type genericType,
+      Annotation[] annotations, MediaType mediaType) {
     byte[] bytes = object.toString().getBytes(); 
-	  buffer.set(bytes);
+    buffer.set(bytes);
     return bytes.length;
-	}
+  }
 
-	@Override
-	public void writeTo(Object object, Class<?> type, Type genericType,
-			Annotation[] annotations, MediaType mediaType,
-			MultivaluedMap<String, Object> httpHeaders, OutputStream outStream)
-			throws IOException, WebApplicationException {
+  @Override
+  public void writeTo(Object object, Class<?> type, Type genericType,
+      Annotation[] annotations, MediaType mediaType,
+      MultivaluedMap<String, Object> httpHeaders, OutputStream outStream)
+      throws IOException, WebApplicationException {
     byte[] bytes = buffer.get();
-		outStream.write(bytes);
+    outStream.write(bytes);
     buffer.remove();
-	}	
+  }	
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
index 6d737b5..0c2430f 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
@@ -50,32 +50,32 @@ public class ProtobufMessageBodyProducer
 
   private ThreadLocal<byte[]> buffer = new ThreadLocal<byte[]>();
 
-	@Override
-	public boolean isWriteable(Class<?> type, Type genericType, 
-	  Annotation[] annotations, MediaType mediaType) {
+  @Override
+  public boolean isWriteable(Class<?> type, Type genericType, 
+    Annotation[] annotations, MediaType mediaType) {
       return ProtobufMessageHandler.class.isAssignableFrom(type);
   }
 
-	@Override
-	public long getSize(ProtobufMessageHandler m, Class<?> type, Type genericType,
-	    Annotation[] annotations, MediaType mediaType) {
-	  ByteArrayOutputStream baos = new ByteArrayOutputStream();
-	  try {
-	    baos.write(m.createProtobufOutput());
-	  } catch (IOException e) {
-	    return -1;
-	  }
-	  byte[] bytes = baos.toByteArray();
-	  buffer.set(bytes);
-	  return bytes.length;
-	}
+  @Override
+  public long getSize(ProtobufMessageHandler m, Class<?> type, Type genericType,
+      Annotation[] annotations, MediaType mediaType) {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    try {
+      baos.write(m.createProtobufOutput());
+    } catch (IOException e) {
+      return -1;
+    }
+    byte[] bytes = baos.toByteArray();
+    buffer.set(bytes);
+    return bytes.length;
+  }
 
-	public void writeTo(ProtobufMessageHandler m, Class<?> type, Type genericType,
-	    Annotation[] annotations, MediaType mediaType, 
-	    MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream) 
-	    throws IOException, WebApplicationException {
+  public void writeTo(ProtobufMessageHandler m, Class<?> type, Type genericType,
+      Annotation[] annotations, MediaType mediaType, 
+      MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream) 
+      throws IOException, WebApplicationException {
     byte[] bytes = buffer.get();
-	  entityStream.write(bytes);
+    entityStream.write(bytes);
     buffer.remove();
-	}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java
index 6a21a69..7c4ed01 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java
@@ -26,10 +26,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public interface HBaseRPCErrorHandler {
-	/**
-	 * Take actions on the event of an OutOfMemoryError.
-	 * @param e the throwable
-	 * @return if the server should be shut down
-	 */
+  /**
+   * Take actions on the event of an OutOfMemoryError.
+   * @param e the throwable
+   * @return if the server should be shut down
+   */
   boolean checkOOME(final Throwable e) ;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index e7d52a2..196320d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -976,7 +976,7 @@ public class RegionPlacementMaintainer {
     opt.addOption("munkres", false,
         "use munkres to place secondaries and tertiaries");
     opt.addOption("ld", "locality-dispersion", false, "print locality and dispersion " +
-    		"information for current plan");
+        "information for current plan");
     try {
       // Set the log4j
       Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
index d6f1b67..b03611c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
@@ -94,7 +94,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
    */
   public void initialize() throws IOException {
     LOG.info("Start to scan the hbase:meta for the current region assignment " +
-		"snappshot");
+      "snappshot");
     // TODO: at some point this code could live in the MetaTableAccessor
     Visitor v = new Visitor() {
       @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index a070fd8..db66f5c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2489,7 +2489,7 @@ public class HRegionServer extends HasThread implements
    * @see org.apache.hadoop.hbase.regionserver.HRegionServerCommandLine
    */
   public static void main(String[] args) throws Exception {
-	VersionInfo.logVersion();
+    VersionInfo.logVersion();
     Configuration conf = HBaseConfiguration.create();
     @SuppressWarnings("unchecked")
     Class<? extends HRegionServer> regionServerClass = (Class<? extends HRegionServer>) conf

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
index 0052b00..23d034f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
@@ -112,7 +112,7 @@ public class SplitLogWorker implements Runnable {
                   || cause instanceof ConnectException
                   || cause instanceof SocketTimeoutException)) {
             LOG.warn("log replaying of " + filename + " can't connect to the target regionserver, "
-            		+ "resigning", e);
+                + "resigning", e);
             return Status.RESIGNED;
           } else if (cause instanceof InterruptedException) {
             LOG.warn("log splitting of " + filename + " interrupted, resigning", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HLogSplitterHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HLogSplitterHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HLogSplitterHandler.java
index b0f3f0b..8e2ee62 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HLogSplitterHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HLogSplitterHandler.java
@@ -54,7 +54,7 @@ public class HLogSplitterHandler extends EventHandler {
   public HLogSplitterHandler(final Server server, SplitLogWorkerCoordination coordination,
       SplitLogWorkerCoordination.SplitTaskDetails splitDetails, CancelableProgressable reporter,
       AtomicInteger inProgressTasks, TaskExecutor splitTaskExecutor, RecoveryMode mode) {
-	  super(server, EventType.RS_LOG_REPLAY);
+    super(server, EventType.RS_LOG_REPLAY);
     this.splitTaskDetails = splitDetails;
     this.coordination = coordination;
     this.reporter = reporter;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 2a28e11..592e4a6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -990,7 +990,7 @@ class FSHLog implements HLog, Syncable {
     String prefixPathStr = new Path(fullPathLogDir, logFilePrefix + ".").toString();
     if (!fileName.toString().startsWith(prefixPathStr)) {
       throw new IllegalArgumentException("The log file " + fileName + " doesn't belong to" +
-      		" this regionserver " + prefixPathStr);
+          " this regionserver " + prefixPathStr);
     }
     String chompedPath = fileName.toString().substring(prefixPathStr.length());
     if (forMeta) chompedPath = chompedPath.substring(0, chompedPath.indexOf(META_HLOG_FILE_EXTN));

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index f9b840b..90f5bdf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -366,8 +366,8 @@ public class HBaseFsck extends Configured {
     if (hbckOutFd == null) {
       setRetCode(-1);
       LOG.error("Another instance of hbck is running, exiting this instance.[If you are sure" +
-		      " no other instance is running, delete the lock file " +
-		      HBCK_LOCK_PATH + " and rerun the tool]");
+          " no other instance is running, delete the lock file " +
+          HBCK_LOCK_PATH + " and rerun the tool]");
       throw new IOException("Duplicate hbck - Abort");
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
index 025d98e..1eab2d2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
@@ -50,7 +50,7 @@ import org.apache.zookeeper.KeeperException;
 public class RegionServerTracker extends ZooKeeperListener {
   private static final Log LOG = LogFactory.getLog(RegionServerTracker.class);
   private NavigableMap<ServerName, RegionServerInfo> regionServers = 
-		  new TreeMap<ServerName, RegionServerInfo>();
+      new TreeMap<ServerName, RegionServerInfo>();
   private ServerManager serverManager;
   private Server server;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index efe27e2..271401b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1022,8 +1022,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
    * @throws IOException
    */
   public Path getDefaultRootDirPath() throws IOException {
-	FileSystem fs = FileSystem.get(this.conf);
-	return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
+    FileSystem fs = FileSystem.get(this.conf);
+    return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java
index 3b31789..a226a8c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java
@@ -57,75 +57,75 @@ public class TestHTablePool {
 
     protected abstract PoolType getPoolType();
 
-		@Test
-		public void testTableWithStringName() throws Exception {
-			HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(),
-					Integer.MAX_VALUE, getPoolType());
-			String tableName = TABLENAME;
-
-			// Request a table from an empty pool
-			Table table = pool.getTable(tableName);
-			Assert.assertNotNull(table);
-
-			// Close table (returns table to the pool)
-			table.close();
-
-			// Request a table of the same name
-			Table sameTable = pool.getTable(tableName);
-			Assert.assertSame(
-					((HTablePool.PooledHTable) table).getWrappedTable(),
-					((HTablePool.PooledHTable) sameTable).getWrappedTable());
-		}
-
-		@Test
-		public void testTableWithByteArrayName() throws IOException {
-			HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(),
-					Integer.MAX_VALUE, getPoolType());
-
-			// Request a table from an empty pool
-			Table table = pool.getTable(TABLENAME);
-			Assert.assertNotNull(table);
-
-			// Close table (returns table to the pool)
-			table.close();
-
-			// Request a table of the same name
-			Table sameTable = pool.getTable(TABLENAME);
-			Assert.assertSame(
-					((HTablePool.PooledHTable) table).getWrappedTable(),
-					((HTablePool.PooledHTable) sameTable).getWrappedTable());
-		}
-
-		@Test
-		public void testTablesWithDifferentNames() throws IOException {
-			HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(),
-					Integer.MAX_VALUE, getPoolType());
+    @Test
+    public void testTableWithStringName() throws Exception {
+      HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(),
+          Integer.MAX_VALUE, getPoolType());
+      String tableName = TABLENAME;
+
+      // Request a table from an empty pool
+      Table table = pool.getTable(tableName);
+      Assert.assertNotNull(table);
+
+      // Close table (returns table to the pool)
+      table.close();
+
+      // Request a table of the same name
+      Table sameTable = pool.getTable(tableName);
+      Assert.assertSame(
+          ((HTablePool.PooledHTable) table).getWrappedTable(),
+          ((HTablePool.PooledHTable) sameTable).getWrappedTable());
+    }
+
+    @Test
+    public void testTableWithByteArrayName() throws IOException {
+      HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(),
+          Integer.MAX_VALUE, getPoolType());
+
+      // Request a table from an empty pool
+      Table table = pool.getTable(TABLENAME);
+      Assert.assertNotNull(table);
+
+      // Close table (returns table to the pool)
+      table.close();
+
+      // Request a table of the same name
+      Table sameTable = pool.getTable(TABLENAME);
+      Assert.assertSame(
+          ((HTablePool.PooledHTable) table).getWrappedTable(),
+          ((HTablePool.PooledHTable) sameTable).getWrappedTable());
+    }
+
+    @Test
+    public void testTablesWithDifferentNames() throws IOException {
+      HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(),
+          Integer.MAX_VALUE, getPoolType());
       // We add the class to the table name as the HBase cluster is reused
       //  during the tests: this gives naming unicity.
-			byte[] otherTable = Bytes.toBytes(
+      byte[] otherTable = Bytes.toBytes(
         "OtherTable_" + getClass().getSimpleName()
       );
-			TEST_UTIL.createTable(otherTable, HConstants.CATALOG_FAMILY);
-
-			// Request a table from an empty pool
-			Table table1 = pool.getTable(TABLENAME);
-			Table table2 = pool.getTable(otherTable);
-			Assert.assertNotNull(table2);
-
-			// Close tables (returns tables to the pool)
-			table1.close();
-			table2.close();
-
-			// Request tables of the same names
-			Table sameTable1 = pool.getTable(TABLENAME);
-			Table sameTable2 = pool.getTable(otherTable);
-			Assert.assertSame(
-					((HTablePool.PooledHTable) table1).getWrappedTable(),
-					((HTablePool.PooledHTable) sameTable1).getWrappedTable());
-			Assert.assertSame(
-					((HTablePool.PooledHTable) table2).getWrappedTable(),
-					((HTablePool.PooledHTable) sameTable2).getWrappedTable());
-		}
+      TEST_UTIL.createTable(otherTable, HConstants.CATALOG_FAMILY);
+
+      // Request a table from an empty pool
+      Table table1 = pool.getTable(TABLENAME);
+      Table table2 = pool.getTable(otherTable);
+      Assert.assertNotNull(table2);
+
+      // Close tables (returns tables to the pool)
+      table1.close();
+      table2.close();
+
+      // Request tables of the same names
+      Table sameTable1 = pool.getTable(TABLENAME);
+      Table sameTable2 = pool.getTable(otherTable);
+      Assert.assertSame(
+          ((HTablePool.PooledHTable) table1).getWrappedTable(),
+          ((HTablePool.PooledHTable) sameTable1).getWrappedTable());
+      Assert.assertSame(
+          ((HTablePool.PooledHTable) table2).getWrappedTable(),
+          ((HTablePool.PooledHTable) sameTable2).getWrappedTable());
+    }
     @Test
     public void testProxyImplementationReturned() {
       HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(),
@@ -208,161 +208,159 @@ public class TestHTablePool {
         pool.close();
       }
 
+    }   
+
+  }
+
+  @Category(MediumTests.class)
+  public static class TestHTableReusablePool extends TestHTablePoolType {
+    @Override
+    protected PoolType getPoolType() {
+      return PoolType.Reusable;
     }
 
-   
+    @Test
+    public void testTableWithMaxSize() throws Exception {
+      HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 2,
+          getPoolType());
+
+      // Request tables from an empty pool
+      Table table1 = pool.getTable(TABLENAME);
+      Table table2 = pool.getTable(TABLENAME);
+      Table table3 = pool.getTable(TABLENAME);
+
+      // Close tables (returns tables to the pool)
+      table1.close();
+      table2.close();
+      // The pool should reject this one since it is already full
+      table3.close();
+
+      // Request tables of the same name
+      Table sameTable1 = pool.getTable(TABLENAME);
+      Table sameTable2 = pool.getTable(TABLENAME);
+      Table sameTable3 = pool.getTable(TABLENAME);
+      Assert.assertSame(
+          ((HTablePool.PooledHTable) table1).getWrappedTable(),
+          ((HTablePool.PooledHTable) sameTable1).getWrappedTable());
+      Assert.assertSame(
+          ((HTablePool.PooledHTable) table2).getWrappedTable(),
+          ((HTablePool.PooledHTable) sameTable2).getWrappedTable());
+      Assert.assertNotSame(
+          ((HTablePool.PooledHTable) table3).getWrappedTable(),
+          ((HTablePool.PooledHTable) sameTable3).getWrappedTable());
+    }
+
+    @Test
+    public void testCloseTablePool() throws IOException {
+      HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 4,
+          getPoolType());
+      HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
+
+      if (admin.tableExists(TABLENAME)) {
+        admin.disableTable(TABLENAME);
+        admin.deleteTable(TABLENAME);
+      }
+
+      HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLENAME));
+      tableDescriptor.addFamily(new HColumnDescriptor("randomFamily"));
+      admin.createTable(tableDescriptor);
+
+      // Request tables from an empty pool
+      Table[] tables = new Table[4];
+      for (int i = 0; i < 4; ++i) {
+        tables[i] = pool.getTable(TABLENAME);
+      }
 
+      pool.closeTablePool(TABLENAME);
+
+      for (int i = 0; i < 4; ++i) {
+        tables[i].close();
+      }
+
+      Assert.assertEquals(4,
+          pool.getCurrentPoolSize(TABLENAME));
+
+      pool.closeTablePool(TABLENAME);
+
+      Assert.assertEquals(0,
+          pool.getCurrentPoolSize(TABLENAME));
+    }
   }
 
   @Category(MediumTests.class)
-	public static class TestHTableReusablePool extends TestHTablePoolType {
-		@Override
-		protected PoolType getPoolType() {
-			return PoolType.Reusable;
-		}
-
-		@Test
-		public void testTableWithMaxSize() throws Exception {
-			HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 2,
-					getPoolType());
-
-			// Request tables from an empty pool
-			Table table1 = pool.getTable(TABLENAME);
-			Table table2 = pool.getTable(TABLENAME);
-			Table table3 = pool.getTable(TABLENAME);
-
-			// Close tables (returns tables to the pool)
-			table1.close();
-			table2.close();
-			// The pool should reject this one since it is already full
-			table3.close();
-
-			// Request tables of the same name
-			Table sameTable1 = pool.getTable(TABLENAME);
-			Table sameTable2 = pool.getTable(TABLENAME);
-			Table sameTable3 = pool.getTable(TABLENAME);
-			Assert.assertSame(
-					((HTablePool.PooledHTable) table1).getWrappedTable(),
-					((HTablePool.PooledHTable) sameTable1).getWrappedTable());
-			Assert.assertSame(
-					((HTablePool.PooledHTable) table2).getWrappedTable(),
-					((HTablePool.PooledHTable) sameTable2).getWrappedTable());
-			Assert.assertNotSame(
-					((HTablePool.PooledHTable) table3).getWrappedTable(),
-					((HTablePool.PooledHTable) sameTable3).getWrappedTable());
-		}
-
-		@Test
-		public void testCloseTablePool() throws IOException {
-			HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 4,
-					getPoolType());
-			HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
-
-			if (admin.tableExists(TABLENAME)) {
-				admin.disableTable(TABLENAME);
-				admin.deleteTable(TABLENAME);
-			}
-
-			HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLENAME));
-			tableDescriptor.addFamily(new HColumnDescriptor("randomFamily"));
-			admin.createTable(tableDescriptor);
-
-			// Request tables from an empty pool
-			Table[] tables = new Table[4];
-			for (int i = 0; i < 4; ++i) {
-				tables[i] = pool.getTable(TABLENAME);
-			}
-
-			pool.closeTablePool(TABLENAME);
-
-			for (int i = 0; i < 4; ++i) {
-				tables[i].close();
-			}
-
-			Assert.assertEquals(4,
-					pool.getCurrentPoolSize(TABLENAME));
-
-			pool.closeTablePool(TABLENAME);
-
-			Assert.assertEquals(0,
-					pool.getCurrentPoolSize(TABLENAME));
-		}
-	}
+  public static class TestHTableThreadLocalPool extends TestHTablePoolType {
+    @Override
+    protected PoolType getPoolType() {
+      return PoolType.ThreadLocal;
+    }
 
-  @Category(MediumTests.class)
-	public static class TestHTableThreadLocalPool extends TestHTablePoolType {
-		@Override
-		protected PoolType getPoolType() {
-			return PoolType.ThreadLocal;
-		}
-
-		@Test
-		public void testTableWithMaxSize() throws Exception {
-			HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 2,
-					getPoolType());
-
-			// Request tables from an empty pool
-			Table table1 = pool.getTable(TABLENAME);
-			Table table2 = pool.getTable(TABLENAME);
-			Table table3 = pool.getTable(TABLENAME);
-
-			// Close tables (returns tables to the pool)
-			table1.close();
-			table2.close();
-			// The pool should not reject this one since the number of threads
-			// <= 2
-			table3.close();
-
-			// Request tables of the same name
-			Table sameTable1 = pool.getTable(TABLENAME);
-			Table sameTable2 = pool.getTable(TABLENAME);
-			Table sameTable3 = pool.getTable(TABLENAME);
-			Assert.assertSame(
-					((HTablePool.PooledHTable) table3).getWrappedTable(),
-					((HTablePool.PooledHTable) sameTable1).getWrappedTable());
-			Assert.assertSame(
-					((HTablePool.PooledHTable) table3).getWrappedTable(),
-					((HTablePool.PooledHTable) sameTable2).getWrappedTable());
-			Assert.assertSame(
-					((HTablePool.PooledHTable) table3).getWrappedTable(),
-					((HTablePool.PooledHTable) sameTable3).getWrappedTable());
-		}
-
-		@Test
-		public void testCloseTablePool() throws IOException {
-			HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 4,
-					getPoolType());
-			HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
-
-			if (admin.tableExists(TABLENAME)) {
-				admin.disableTable(TABLENAME);
-				admin.deleteTable(TABLENAME);
-			}
-
-			HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLENAME));
-			tableDescriptor.addFamily(new HColumnDescriptor("randomFamily"));
-			admin.createTable(tableDescriptor);
-
-			// Request tables from an empty pool
-			Table[] tables = new Table[4];
-			for (int i = 0; i < 4; ++i) {
-				tables[i] = pool.getTable(TABLENAME);
-			}
-
-			pool.closeTablePool(TABLENAME);
-
-			for (int i = 0; i < 4; ++i) {
-				tables[i].close();
-			}
-
-			Assert.assertEquals(1,
-					pool.getCurrentPoolSize(TABLENAME));
-
-			pool.closeTablePool(TABLENAME);
-
-			Assert.assertEquals(0,
-					pool.getCurrentPoolSize(TABLENAME));
-		}
-	}
+    @Test
+    public void testTableWithMaxSize() throws Exception {
+      HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 2,
+          getPoolType());
+
+      // Request tables from an empty pool
+      Table table1 = pool.getTable(TABLENAME);
+      Table table2 = pool.getTable(TABLENAME);
+      Table table3 = pool.getTable(TABLENAME);
+
+      // Close tables (returns tables to the pool)
+      table1.close();
+      table2.close();
+      // The pool should not reject this one since the number of threads
+      // <= 2
+      table3.close();
+
+      // Request tables of the same name
+      Table sameTable1 = pool.getTable(TABLENAME);
+      Table sameTable2 = pool.getTable(TABLENAME);
+      Table sameTable3 = pool.getTable(TABLENAME);
+      Assert.assertSame(
+          ((HTablePool.PooledHTable) table3).getWrappedTable(),
+          ((HTablePool.PooledHTable) sameTable1).getWrappedTable());
+      Assert.assertSame(
+          ((HTablePool.PooledHTable) table3).getWrappedTable(),
+          ((HTablePool.PooledHTable) sameTable2).getWrappedTable());
+      Assert.assertSame(
+          ((HTablePool.PooledHTable) table3).getWrappedTable(),
+          ((HTablePool.PooledHTable) sameTable3).getWrappedTable());
+    }
+
+    @Test
+    public void testCloseTablePool() throws IOException {
+      HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 4,
+          getPoolType());
+      HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
+
+      if (admin.tableExists(TABLENAME)) {
+        admin.disableTable(TABLENAME);
+        admin.deleteTable(TABLENAME);
+      }
+
+      HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLENAME));
+      tableDescriptor.addFamily(new HColumnDescriptor("randomFamily"));
+      admin.createTable(tableDescriptor);
+
+      // Request tables from an empty pool
+      Table[] tables = new Table[4];
+      for (int i = 0; i < 4; ++i) {
+        tables[i] = pool.getTable(TABLENAME);
+      }
+
+      pool.closeTablePool(TABLENAME);
+
+      for (int i = 0; i < 4; ++i) {
+        tables[i].close();
+      }
+
+      Assert.assertEquals(1,
+          pool.getCurrentPoolSize(TABLENAME));
+
+      pool.closeTablePool(TABLENAME);
+
+      Assert.assertEquals(0,
+          pool.getCurrentPoolSize(TABLENAME));
+    }
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBigDecimalColumnInterpreter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBigDecimalColumnInterpreter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBigDecimalColumnInterpreter.java
index 06773bd..c18444a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBigDecimalColumnInterpreter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBigDecimalColumnInterpreter.java
@@ -133,7 +133,7 @@ public class TestBigDecimalColumnInterpreter {
     Scan scan = new Scan();
     scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal median = aClient.median(TEST_TABLE, ci, scan);
     assertEquals(new BigDecimal("8.00"), median);
   }
@@ -152,7 +152,7 @@ public class TestBigDecimalColumnInterpreter {
     Scan scan = new Scan();
     scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal maximum = aClient.max(TEST_TABLE, ci, scan);
     assertEquals(new BigDecimal("19.00"), maximum);
   }
@@ -201,7 +201,7 @@ public class TestBigDecimalColumnInterpreter {
   public void testMaxWithValidRangeWithNullCF() {
     AggregationClient aClient = new AggregationClient(conf);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     Scan scan = new Scan();
     BigDecimal max = null;
     try {
@@ -217,7 +217,7 @@ public class TestBigDecimalColumnInterpreter {
   public void testMaxWithInvalidRange() {
     AggregationClient aClient = new AggregationClient(conf);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     Scan scan = new Scan();
     scan.setStartRow(ROWS[4]);
     scan.setStopRow(ROWS[2]);
@@ -242,7 +242,7 @@ public class TestBigDecimalColumnInterpreter {
     try {
       AggregationClient aClient = new AggregationClient(conf);
       final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	      new BigDecimalColumnInterpreter();
+        new BigDecimalColumnInterpreter();
       max = aClient.max(TEST_TABLE, ci, scan);
     } catch (Exception e) {
       max = BigDecimal.ZERO;
@@ -259,7 +259,7 @@ public class TestBigDecimalColumnInterpreter {
     Filter f = new PrefixFilter(Bytes.toBytes("foo:bar"));
     scan.setFilter(f);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     max = aClient.max(TEST_TABLE, ci, scan);
     assertEquals(null, max);
   }
@@ -279,7 +279,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(HConstants.EMPTY_START_ROW);
     scan.setStopRow(HConstants.EMPTY_END_ROW);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal min = aClient.min(TEST_TABLE, ci, scan);
     assertEquals(new BigDecimal("0.00"), min);
   }
@@ -295,7 +295,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[5]);
     scan.setStopRow(ROWS[15]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal min = aClient.min(TEST_TABLE, ci, scan);
     assertEquals(new BigDecimal("5.00"), min);
   }
@@ -308,7 +308,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(HConstants.EMPTY_START_ROW);
     scan.setStopRow(HConstants.EMPTY_END_ROW);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal min = aClient.min(TEST_TABLE, ci, scan);
     assertEquals(new BigDecimal("0.00"), min);
   }
@@ -321,7 +321,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[6]);
     scan.setStopRow(ROWS[7]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal min = aClient.min(TEST_TABLE, ci, scan);
     assertEquals(new BigDecimal("0.60"), min);
   }
@@ -333,7 +333,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[5]);
     scan.setStopRow(ROWS[15]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal min = null;
     try {
       min = aClient.min(TEST_TABLE, ci, scan);
@@ -352,7 +352,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[4]);
     scan.setStopRow(ROWS[2]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     try {
       min = aClient.min(TEST_TABLE, ci, scan);
     } catch (Throwable e) {
@@ -368,7 +368,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[6]);
     scan.setStopRow(ROWS[6]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal min = null;
     try {
       min = aClient.min(TEST_TABLE, ci, scan);
@@ -385,7 +385,7 @@ public class TestBigDecimalColumnInterpreter {
     Filter f = new PrefixFilter(Bytes.toBytes("foo:bar"));
     scan.setFilter(f);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal min = null;
     min = aClient.min(TEST_TABLE, ci, scan);
     assertEquals(null, min);
@@ -403,7 +403,7 @@ public class TestBigDecimalColumnInterpreter {
     Scan scan = new Scan();
     scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal sum = aClient.sum(TEST_TABLE, ci, scan);
     assertEquals(new BigDecimal("190.00"), sum);
   }
@@ -419,7 +419,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[5]);
     scan.setStopRow(ROWS[15]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal sum = aClient.sum(TEST_TABLE, ci, scan);
     assertEquals(new BigDecimal("95.00"), sum);
   }
@@ -430,7 +430,7 @@ public class TestBigDecimalColumnInterpreter {
     Scan scan = new Scan();
     scan.addFamily(TEST_FAMILY);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal sum = aClient.sum(TEST_TABLE, ci, scan);
     assertEquals(new BigDecimal("209.00"), sum); // 190 + 19
   }
@@ -443,7 +443,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[6]);
     scan.setStopRow(ROWS[7]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal sum = aClient.sum(TEST_TABLE, ci, scan);
     assertEquals(new BigDecimal("6.60"), sum); // 6 + 60
   }
@@ -455,7 +455,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[6]);
     scan.setStopRow(ROWS[7]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal sum = null;
     try {
       sum = aClient.sum(TEST_TABLE, ci, scan);
@@ -473,7 +473,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[6]);
     scan.setStopRow(ROWS[2]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal sum = null;
     try {
       sum = aClient.sum(TEST_TABLE, ci, scan);
@@ -490,7 +490,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.addFamily(TEST_FAMILY);
     scan.setFilter(f);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     BigDecimal sum = null;
     sum = aClient.sum(TEST_TABLE, ci, scan);
     assertEquals(null, sum);
@@ -508,7 +508,7 @@ public class TestBigDecimalColumnInterpreter {
     Scan scan = new Scan();
     scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     double avg = aClient.avg(TEST_TABLE, ci, scan);
     assertEquals(9.5, avg, 0);
   }
@@ -524,7 +524,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[5]);
     scan.setStopRow(ROWS[15]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     double avg = aClient.avg(TEST_TABLE, ci, scan);
     assertEquals(9.5, avg, 0);
   }
@@ -535,7 +535,7 @@ public class TestBigDecimalColumnInterpreter {
     Scan scan = new Scan();
     scan.addFamily(TEST_FAMILY);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     double avg = aClient.avg(TEST_TABLE, ci, scan);
     assertEquals(10.45, avg, 0.01);
   }
@@ -548,7 +548,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[6]);
     scan.setStopRow(ROWS[7]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     double avg = aClient.avg(TEST_TABLE, ci, scan);
     assertEquals(6 + 0.60, avg, 0);
   }
@@ -558,7 +558,7 @@ public class TestBigDecimalColumnInterpreter {
     AggregationClient aClient = new AggregationClient(conf);
     Scan scan = new Scan();
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     Double avg = null;
     try {
       avg = aClient.avg(TEST_TABLE, ci, scan);
@@ -576,7 +576,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[5]);
     scan.setStopRow(ROWS[1]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     Double avg = null;
     try {
       avg = aClient.avg(TEST_TABLE, ci, scan);
@@ -593,7 +593,7 @@ public class TestBigDecimalColumnInterpreter {
     Filter f = new PrefixFilter(Bytes.toBytes("foo:bar"));
     scan.setFilter(f);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     Double avg = null;
     avg = aClient.avg(TEST_TABLE, ci, scan);
     assertEquals(Double.NaN, avg, 0);
@@ -611,7 +611,7 @@ public class TestBigDecimalColumnInterpreter {
     Scan scan = new Scan();
     scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     double std = aClient.std(TEST_TABLE, ci, scan);
     assertEquals(5.766, std, 0.05d);
   }
@@ -628,7 +628,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[5]);
     scan.setStopRow(ROWS[15]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     double std = aClient.std(TEST_TABLE, ci, scan);
     assertEquals(2.87, std, 0.05d);
   }
@@ -643,7 +643,7 @@ public class TestBigDecimalColumnInterpreter {
     Scan scan = new Scan();
     scan.addFamily(TEST_FAMILY);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     double std = aClient.std(TEST_TABLE, ci, scan);
     assertEquals(6.342, std, 0.05d);
   }
@@ -656,7 +656,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[6]);
     scan.setStopRow(ROWS[7]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     double std = aClient.std(TEST_TABLE, ci, scan);
     System.out.println("std is:" + std);
     assertEquals(0, std, 0.05d);
@@ -669,7 +669,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[6]);
     scan.setStopRow(ROWS[17]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     Double std = null;
     try {
       std = aClient.std(TEST_TABLE, ci, scan);
@@ -687,7 +687,7 @@ public class TestBigDecimalColumnInterpreter {
     scan.setStartRow(ROWS[6]);
     scan.setStopRow(ROWS[1]);
     final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
-	    new BigDecimalColumnInterpreter();
+      new BigDecimalColumnInterpreter();
     Double std = null;
     try {
       std = aClient.std(TEST_TABLE, ci, scan);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
index 757420c..110a5b2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
@@ -48,18 +48,18 @@ import org.junit.experimental.categories.Category;
 public class TestDependentColumnFilter {
   private final Log LOG = LogFactory.getLog(this.getClass());
   private static final byte[][] ROWS = {
-	  Bytes.toBytes("test1"),Bytes.toBytes("test2")
+    Bytes.toBytes("test1"),Bytes.toBytes("test2")
   };
   private static final byte[][] FAMILIES = {
-	  Bytes.toBytes("familyOne"),Bytes.toBytes("familyTwo")
+    Bytes.toBytes("familyOne"),Bytes.toBytes("familyTwo")
   };
   private static final long STAMP_BASE = System.currentTimeMillis();
   private static final long[] STAMPS = {
-	  STAMP_BASE-100, STAMP_BASE-200, STAMP_BASE-300
+    STAMP_BASE-100, STAMP_BASE-200, STAMP_BASE-300
   };
   private static final byte[] QUALIFIER = Bytes.toBytes("qualifier");
   private static final byte[][] BAD_VALS = {
-	  Bytes.toBytes("bad1"), Bytes.toBytes("bad2"), Bytes.toBytes("bad3")
+    Bytes.toBytes("bad1"), Bytes.toBytes("bad2"), Bytes.toBytes("bad3")
   };
   private static final byte[] MATCH_VAL = Bytes.toBytes("match");
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@@ -117,14 +117,14 @@ public class TestDependentColumnFilter {
   }
 
   private List<KeyValue> makeTestVals() {
-	List<KeyValue> testVals = new ArrayList<KeyValue>();
-	testVals.add(new KeyValue(ROWS[0], FAMILIES[0], QUALIFIER, STAMPS[0], BAD_VALS[0]));
-	testVals.add(new KeyValue(ROWS[0], FAMILIES[0], QUALIFIER, STAMPS[1], BAD_VALS[1]));
-	testVals.add(new KeyValue(ROWS[0], FAMILIES[1], QUALIFIER, STAMPS[1], BAD_VALS[2]));
-	testVals.add(new KeyValue(ROWS[0], FAMILIES[1], QUALIFIER, STAMPS[0], MATCH_VAL));
-	testVals.add(new KeyValue(ROWS[0], FAMILIES[1], QUALIFIER, STAMPS[2], BAD_VALS[2]));
-
-	return testVals;
+    List<KeyValue> testVals = new ArrayList<KeyValue>();
+    testVals.add(new KeyValue(ROWS[0], FAMILIES[0], QUALIFIER, STAMPS[0], BAD_VALS[0]));
+    testVals.add(new KeyValue(ROWS[0], FAMILIES[0], QUALIFIER, STAMPS[1], BAD_VALS[1]));
+    testVals.add(new KeyValue(ROWS[0], FAMILIES[1], QUALIFIER, STAMPS[1], BAD_VALS[2]));
+    testVals.add(new KeyValue(ROWS[0], FAMILIES[1], QUALIFIER, STAMPS[0], MATCH_VAL));
+    testVals.add(new KeyValue(ROWS[0], FAMILIES[1], QUALIFIER, STAMPS[2], BAD_VALS[2]));
+
+    return testVals;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
index 8494578..c152deb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
@@ -175,68 +175,68 @@ public class TestHFilePerformance extends AbstractHBaseTool {
     FSDataOutputStream fout =  createFSOutput(path);
 
     if ("HFile".equals(fileType)){
-        HFileContextBuilder builder = new HFileContextBuilder()
-	    .withCompression(AbstractHFileWriter.compressionByName(codecName))
-	    .withBlockSize(minBlockSize);
-        if (cipherName != "none") {
-          byte[] cipherKey = new byte[AES.KEY_LENGTH];
-          new SecureRandom().nextBytes(cipherKey);
-          builder.withEncryptionContext(
-            Encryption.newContext(conf)
-              .setCipher(Encryption.getCipher(conf, cipherName))
-              .setKey(cipherKey));
-        }
-        HFileContext context = builder.build();
-        System.out.println("HFile write method: ");
-        HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
-            .withOutputStream(fout)
-            .withFileContext(context)
-            .withComparator(new KeyValue.RawBytesComparator())
-            .create();
-
-        // Writing value in one shot.
-        for (long l=0; l<rows; l++ ) {
-          generator.getKey(key);
-          generator.getValue(value);
-          writer.append(CellUtil.createCell(key, value));
-          totalBytesWritten += key.length;
-          totalBytesWritten += value.length;
-         }
-        writer.close();
+      HFileContextBuilder builder = new HFileContextBuilder()
+        .withCompression(AbstractHFileWriter.compressionByName(codecName))
+        .withBlockSize(minBlockSize);
+      if (cipherName != "none") {
+        byte[] cipherKey = new byte[AES.KEY_LENGTH];
+        new SecureRandom().nextBytes(cipherKey);
+        builder.withEncryptionContext(
+          Encryption.newContext(conf)
+            .setCipher(Encryption.getCipher(conf, cipherName))
+            .setKey(cipherKey));
+      }
+      HFileContext context = builder.build();
+      System.out.println("HFile write method: ");
+      HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
+          .withOutputStream(fout)
+          .withFileContext(context)
+          .withComparator(new KeyValue.RawBytesComparator())
+          .create();
+
+      // Writing value in one shot.
+      for (long l=0; l<rows; l++ ) {
+        generator.getKey(key);
+        generator.getValue(value);
+        writer.append(CellUtil.createCell(key, value));
+        totalBytesWritten += key.length;
+        totalBytesWritten += value.length;
+      }
+      writer.close();
     } else if ("SequenceFile".equals(fileType)){
-        CompressionCodec codec = null;
-        if ("gz".equals(codecName))
-          codec = new GzipCodec();
-        else if (!"none".equals(codecName))
-          throw new IOException("Codec not supported.");
-
-        SequenceFile.Writer writer;
-
-        //TODO
-        //JobConf conf = new JobConf();
-
-        if (!"none".equals(codecName))
-          writer = SequenceFile.createWriter(conf, fout, BytesWritable.class,
-            BytesWritable.class, SequenceFile.CompressionType.BLOCK, codec);
-        else
-          writer = SequenceFile.createWriter(conf, fout, BytesWritable.class,
-            BytesWritable.class, SequenceFile.CompressionType.NONE, null);
-
-        BytesWritable keyBsw;
-        BytesWritable valBsw;
-        for (long l=0; l<rows; l++ ) {
-
-           generator.getKey(key);
-           keyBsw = new BytesWritable(key);
-           totalBytesWritten += keyBsw.getSize();
-
-           generator.getValue(value);
-           valBsw = new BytesWritable(value);
-           writer.append(keyBsw, valBsw);
-           totalBytesWritten += valBsw.getSize();
-        }
-
-        writer.close();
+      CompressionCodec codec = null;
+      if ("gz".equals(codecName))
+        codec = new GzipCodec();
+      else if (!"none".equals(codecName))
+        throw new IOException("Codec not supported.");
+
+      SequenceFile.Writer writer;
+
+      //TODO
+      //JobConf conf = new JobConf();
+
+      if (!"none".equals(codecName))
+        writer = SequenceFile.createWriter(conf, fout, BytesWritable.class,
+          BytesWritable.class, SequenceFile.CompressionType.BLOCK, codec);
+      else
+        writer = SequenceFile.createWriter(conf, fout, BytesWritable.class,
+          BytesWritable.class, SequenceFile.CompressionType.NONE, null);
+
+      BytesWritable keyBsw;
+      BytesWritable valBsw;
+      for (long l=0; l<rows; l++ ) {
+
+        generator.getKey(key);
+        keyBsw = new BytesWritable(key);
+        totalBytesWritten += keyBsw.getSize();
+
+        generator.getValue(value);
+        valBsw = new BytesWritable(value);
+        writer.append(keyBsw, valBsw);
+        totalBytesWritten += valBsw.getSize();
+      }
+
+      writer.close();
     } else
        throw new IOException("File Type is not supported");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
index d676147..8f56190 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
@@ -103,7 +103,7 @@ public class TestClockSkewDetection {
     long warningSkew = c.getLong("hbase.master.warningclockskew", 1000);
 
     try {
-    	//Master Time > Region Server Time
+      //Master Time > Region Server Time
       LOG.debug("Test: Master Time > Region Server Time");
       LOG.debug("regionServerStartup 2");
       InetAddress ia2 = InetAddress.getLocalHost();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
index 262e421..0d8c164 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
@@ -397,7 +397,7 @@ public class TestMajorCompaction {
   private void createSmallerStoreFile(final HRegion region) throws IOException {
     HRegionIncommon loader = new HRegionIncommon(region);
     HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
-    		"bbb").getBytes(), null);
+        "bbb").getBytes(), null);
     loader.flushcache();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index ba1b1bb..4e488a3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -1226,7 +1226,7 @@ public class TestSplitTransactionOnCluster {
     cluster.abortMaster(0);
     cluster.waitOnMaster(0);
     cluster.getConfiguration().setClass(HConstants.MASTER_IMPL,
-    		MockMasterWithoutCatalogJanitor.class, HMaster.class);
+        MockMasterWithoutCatalogJanitor.class, HMaster.class);
     MockMasterWithoutCatalogJanitor master = null;
     master = (MockMasterWithoutCatalogJanitor) cluster.startMaster().getMaster();
     cluster.waitForActiveAndReadyMaster();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
index a36c6f0..14bd9d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
@@ -286,11 +286,11 @@ public class TestTags {
         put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
         table.put(put1);
         admin.flush(tableName);
-	// We are lacking an API for confirming flush request compaction.
-	// Just sleep for a short time. We won't be able to confirm flush
-	// completion but the test won't hang now or in the future if
-	// default compaction policy causes compaction between flush and
-	// when we go to confirm it.
+        // We are lacking an API for confirming flush request compaction.
+        // Just sleep for a short time. We won't be able to confirm flush
+        // completion but the test won't hang now or in the future if
+        // default compaction policy causes compaction between flush and
+        // when we go to confirm it.
         Thread.sleep(1000);
 
         put1 = new Put(row2);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
index ca5be65..fdc89ab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
@@ -510,7 +510,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
    */
   @Test(timeout = 300000)
   public void testVerifyListReplicatedTable() throws Exception {
-	LOG.info("testVerifyListReplicatedTable");
+    LOG.info("testVerifyListReplicatedTable");
 
     final String tName = "VerifyListReplicated_";
     final String colFam = "cf1";

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
index a3a7800..45ddddb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
@@ -121,7 +121,7 @@ public class LoadTestTool extends AbstractHBaseTool {
 
   public static final String OPT_INMEMORY = "in_memory";
   public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " +
-  		"inmemory as far as possible.  Not guaranteed that reads are always served from inmemory";
+      "inmemory as far as possible.  Not guaranteed that reads are always served from inmemory";
 
   public static final String OPT_GENERATOR = "generator";
   public static final String OPT_GENERATOR_USAGE = "The class which generates load for the tool."

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
index c47c328..ca06e97 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
@@ -379,9 +379,9 @@ public class MultiThreadedReader extends MultiThreadedAction
           numKeysVerified.incrementAndGet();
         }
       } else {
-		HRegionLocation hloc = connection.getRegionLocation(tableName,
-		    get.getRow(), false);
-         String rowKey = Bytes.toString(get.getRow());
+        HRegionLocation hloc = connection.getRegionLocation(tableName,
+          get.getRow(), false);
+        String rowKey = Bytes.toString(get.getRow());
         LOG.info("Key = " + rowKey + ", Region location: " + hloc);
         if(isNullExpected) {
           nullResult.incrementAndGet();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b12d5778/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
index 464f44b..c61bd78 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
@@ -300,7 +300,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
       } catch (IOException e) {
         if (ignoreNonceConflicts && (e instanceof OperationConflictException)) {
           LOG.info("Detected nonce conflict, ignoring: " + e.getMessage());
-	  totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
+          totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
           return;
         }
         failedKeySet.add(keyBase);


Mime
View raw message