hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From oct...@apache.org
Subject [1/2] hbase git commit: HBASE-12990 MetaScanner should be replaced by MetaTableAccessor
Date Mon, 09 Mar 2015 10:40:13 GMT
Repository: hbase
Updated Branches:
  refs/heads/master 0fba20471 -> 948746ce4


http://git-wip-us.apache.org/repos/asf/hbase/blob/948746ce/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
deleted file mode 100644
index e195baf..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import java.math.BigDecimal;
-import java.util.List;
-import java.util.NavigableMap;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.StoppableImplementation;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({MediumTests.class, ClientTests.class})
-public class TestMetaScanner {
-  final Log LOG = LogFactory.getLog(getClass());
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private Connection connection;
-
-  public void setUp() throws Exception {
-    TEST_UTIL.startMiniCluster(1);
-    this.connection = TEST_UTIL.getConnection();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  @Test
-  public void testMetaScanner() throws Exception {
-    LOG.info("Starting testMetaScanner");
-
-    setUp();
-    final TableName TABLENAME = TableName.valueOf("testMetaScanner");
-    final byte[] FAMILY = Bytes.toBytes("family");
-    final byte[][] SPLIT_KEYS =
-        new byte[][] { Bytes.toBytes("region_a"), Bytes.toBytes("region_b") };
-
-    TEST_UTIL.createTable(TABLENAME, FAMILY, SPLIT_KEYS);
-    HTable table = (HTable) connection.getTable(TABLENAME);
-    // Make sure all the regions are deployed
-    TEST_UTIL.countRows(table);
-
-    MetaScanner.MetaScannerVisitor visitor =
-      mock(MetaScanner.MetaScannerVisitor.class);
-    doReturn(true).when(visitor).processRow((Result)anyObject());
-
-    // Scanning the entire table should give us three rows
-    MetaScanner.metaScan(connection, visitor, TABLENAME);
-    verify(visitor, times(3)).processRow((Result)anyObject());
-
-    // Scanning the table with a specified empty start row should also
-    // give us three hbase:meta rows
-    reset(visitor);
-    doReturn(true).when(visitor).processRow((Result)anyObject());
-    MetaScanner.metaScan(connection, visitor, TABLENAME, HConstants.EMPTY_BYTE_ARRAY, 1000);
-    verify(visitor, times(3)).processRow((Result)anyObject());
-
-    // Scanning the table starting in the middle should give us two rows:
-    // region_a and region_b
-    reset(visitor);
-    doReturn(true).when(visitor).processRow((Result)anyObject());
-    MetaScanner.metaScan(connection, visitor, TABLENAME, Bytes.toBytes("region_ac"), 1000);
-    verify(visitor, times(2)).processRow((Result)anyObject());
-
-    // Scanning with a limit of 1 should only give us one row
-    reset(visitor);
-    doReturn(true).when(visitor).processRow((Result) anyObject());
-    MetaScanner.metaScan(connection, visitor, TABLENAME, Bytes.toBytes("region_ac"), 1);
-    verify(visitor, times(1)).processRow((Result) anyObject());
-    table.close();
-  }
-
-  @Test
-  public void testConcurrentMetaScannerAndCatalogJanitor() throws Throwable {
-    /* TEST PLAN: start with only one region in a table. Have a splitter
-     * thread  and metascanner threads that continously scan the meta table for regions.
-     * CatalogJanitor from master will run frequently to clean things up
-     */
-    TEST_UTIL.getConfiguration().setLong("hbase.catalogjanitor.interval", 500);
-    setUp();
-
-    final long runtime = 30 * 1000; //30 sec
-    LOG.info("Starting testConcurrentMetaScannerAndCatalogJanitor");
-    final TableName TABLENAME =
-        TableName.valueOf("testConcurrentMetaScannerAndCatalogJanitor");
-    final byte[] FAMILY = Bytes.toBytes("family");
-    TEST_UTIL.createTable(TABLENAME, FAMILY);
-
-    class RegionMetaSplitter extends StoppableImplementation implements Runnable {
-      Random random = new Random();
-      Throwable ex = null;
-      @Override
-      public void run() {
-        while (!isStopped()) {
-          try {
-            List<HRegionInfo> regions = MetaScanner.listAllRegions(TEST_UTIL.getConfiguration(),
-                connection, false);
-
-            //select a random region
-            HRegionInfo parent = regions.get(random.nextInt(regions.size()));
-            if (parent == null || !TABLENAME.equals(parent.getTable())) {
-              continue;
-            }
-
-            long startKey = 0, endKey = Long.MAX_VALUE;
-            byte[] start = parent.getStartKey();
-            byte[] end = parent.getEndKey();
-            if (!Bytes.equals(HConstants.EMPTY_START_ROW, parent.getStartKey())) {
-              startKey = Bytes.toLong(parent.getStartKey());
-            }
-            if (!Bytes.equals(HConstants.EMPTY_END_ROW, parent.getEndKey())) {
-              endKey = Bytes.toLong(parent.getEndKey());
-            }
-            if (startKey == endKey) {
-              continue;
-            }
-
-            long midKey = BigDecimal.valueOf(startKey).add(BigDecimal.valueOf(endKey))
-                .divideToIntegralValue(BigDecimal.valueOf(2)).longValue();
-
-            HRegionInfo splita = new HRegionInfo(TABLENAME,
-              start,
-              Bytes.toBytes(midKey));
-            HRegionInfo splitb = new HRegionInfo(TABLENAME,
-              Bytes.toBytes(midKey),
-              end);
-
-            MetaTableAccessor.splitRegion(connection,
-              parent, splita, splitb, ServerName.valueOf("fooserver", 1, 0), 1);
-
-            Threads.sleep(random.nextInt(200));
-          } catch (Throwable e) {
-            ex = e;
-            Assert.fail(StringUtils.stringifyException(e));
-          }
-        }
-      }
-      void rethrowExceptionIfAny() throws Throwable {
-        if (ex != null) { throw ex; }
-      }
-    }
-
-    class MetaScannerVerifier extends StoppableImplementation implements Runnable {
-      Random random = new Random();
-      Throwable ex = null;
-      @Override
-      public void run() {
-         while(!isStopped()) {
-           try {
-            NavigableMap<HRegionInfo, ServerName> regions =
-                MetaScanner.allTableRegions(connection, TABLENAME);
-
-            LOG.info("-------");
-            byte[] lastEndKey = HConstants.EMPTY_START_ROW;
-            for (HRegionInfo hri: regions.navigableKeySet()) {
-              long startKey = 0, endKey = Long.MAX_VALUE;
-              if (!Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey())) {
-                startKey = Bytes.toLong(hri.getStartKey());
-              }
-              if (!Bytes.equals(HConstants.EMPTY_END_ROW, hri.getEndKey())) {
-                endKey = Bytes.toLong(hri.getEndKey());
-              }
-              LOG.info("start:" + startKey + " end:" + endKey + " hri:" + hri);
-              Assert.assertTrue("lastEndKey=" + Bytes.toString(lastEndKey) + ", startKey="
+
-                Bytes.toString(hri.getStartKey()), Bytes.equals(lastEndKey, hri.getStartKey()));
-              lastEndKey = hri.getEndKey();
-            }
-            Assert.assertTrue(Bytes.equals(lastEndKey, HConstants.EMPTY_END_ROW));
-            LOG.info("-------");
-            Threads.sleep(10 + random.nextInt(50));
-          } catch (Throwable e) {
-            ex = e;
-            Assert.fail(StringUtils.stringifyException(e));
-          }
-         }
-      }
-      void rethrowExceptionIfAny() throws Throwable {
-        if (ex != null) { throw ex; }
-      }
-    }
-
-    RegionMetaSplitter regionMetaSplitter = new RegionMetaSplitter();
-    MetaScannerVerifier metaScannerVerifier = new MetaScannerVerifier();
-
-    Thread regionMetaSplitterThread = new Thread(regionMetaSplitter);
-    Thread metaScannerVerifierThread = new Thread(metaScannerVerifier);
-
-    regionMetaSplitterThread.start();
-    metaScannerVerifierThread.start();
-
-    Threads.sleep(runtime);
-
-    regionMetaSplitter.stop("test finished");
-    metaScannerVerifier.stop("test finished");
-
-    regionMetaSplitterThread.join();
-    metaScannerVerifierThread.join();
-
-    regionMetaSplitter.rethrowExceptionIfAny();
-    metaScannerVerifier.rethrowExceptionIfAny();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/948746ce/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index 935d462..4e3baad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -41,14 +41,12 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;

http://git-wip-us.apache.org/repos/asf/hbase/blob/948746ce/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
index 25dd13e..4034291 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
@@ -42,17 +42,14 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.MetaScanner;
-import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
 import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer;
@@ -470,11 +467,11 @@ public class TestRegionPlacement {
     final AtomicInteger regionOnPrimaryNum = new AtomicInteger(0);
     final AtomicInteger totalRegionNum = new AtomicInteger(0);
     LOG.info("The start of region placement verification");
-    MetaScannerVisitor visitor = new MetaScannerVisitor() {
-      public boolean processRow(Result result) throws IOException {
+    MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
+      public boolean visit(Result result) throws IOException {
         try {
           @SuppressWarnings("deprecation")
-          HRegionInfo info = MetaScanner.getHRegionInfo(result);
+          HRegionInfo info = MetaTableAccessor.getHRegionInfo(result);
           if(info.getTable().getNamespaceAsString()
               .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
             return true;
@@ -522,11 +519,8 @@ public class TestRegionPlacement {
           throw e;
         }
       }
-
-      @Override
-      public void close() throws IOException {}
     };
-    MetaScanner.metaScan(CONNECTION, visitor);
+    MetaTableAccessor.fullScanRegions(CONNECTION, visitor);
     LOG.info("There are " + regionOnPrimaryNum.intValue() + " out of " +
         totalRegionNum.intValue() + " regions running on the primary" +
         " region servers" );

http://git-wip-us.apache.org/repos/asf/hbase/blob/948746ce/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
index b51f7c7..692b5a0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
@@ -27,15 +27,14 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.MetaScanner;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -75,8 +74,7 @@ public class TestRestartCluster {
       UTIL.waitTableEnabled(TABLE);
     }
 
-    List<HRegionInfo> allRegions =
-        MetaScanner.listAllRegions(UTIL.getConfiguration(), UTIL.getConnection(), true);
+    List<HRegionInfo> allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(),
false);
     assertEquals(4, allRegions.size());
 
     LOG.info("\n\nShutting down cluster");
@@ -91,8 +89,7 @@ public class TestRestartCluster {
     // Need to use a new 'Configuration' so we make a new HConnection.
     // Otherwise we're reusing an HConnection that has gone stale because
     // the shutdown of the cluster also called shut of the connection.
-    allRegions = MetaScanner
-        .listAllRegions(new Configuration(UTIL.getConfiguration()), UTIL.getConnection(),
true);
+    allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false);
     assertEquals(4, allRegions.size());
     LOG.info("\n\nWaiting for tables to be available");
     for(TableName TABLE: TABLES) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/948746ce/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java
index 6a60fc0..2db658b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master.handler;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.io.IOException;
 
 import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
@@ -27,15 +28,20 @@ import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -49,13 +55,6 @@ import org.junit.experimental.categories.Category;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
-import java.io.IOException;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-
 @Category({ MasterTests.class, MediumTests.class })
 public class TestEnableTableHandler {
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@@ -146,7 +145,8 @@ public class TestEnableTableHandler {
     admin.createTable(desc, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
     // Now I have a nice table, mangle it by removing the HConstants.REGIONINFO_QUALIFIER_STR
     // content from a few of the rows.
-    Scan metaScannerForMyTable = MetaTableAccessor.getScanForTableName(tableName);
+    Scan metaScannerForMyTable =
+        MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(), tableName);
     try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME))
{
       try (ResultScanner scanner = metaTable.getScanner(metaScannerForMyTable)) {
         for (Result result : scanner) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/948746ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index f29601c..a7025c6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.ServerName;
@@ -50,7 +51,6 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.MetaScanner;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
@@ -209,11 +209,11 @@ public class TestEndToEndSplitTransaction {
     stopper.stop(null);
 
     if (regionChecker.ex != null) {
-      throw regionChecker.ex;
+      throw new AssertionError("regionChecker", regionChecker.ex);
     }
 
     if (regionSplitter.ex != null) {
-      throw regionSplitter.ex;
+      throw new AssertionError("regionSplitter", regionSplitter.ex);
     }
 
     //one final check
@@ -243,15 +243,15 @@ public class TestEndToEndSplitTransaction {
       try {
         Random random = new Random();
         for (int i= 0; i< 5; i++) {
-          NavigableMap<HRegionInfo, ServerName> regions =
-              MetaScanner.allTableRegions(connection, tableName);
+          List<HRegionInfo> regions =
+              MetaTableAccessor.getTableRegions(connection, tableName, true);
           if (regions.size() == 0) {
             continue;
           }
           int regionIndex = random.nextInt(regions.size());
 
           //pick a random region and split it into two
-          HRegionInfo region = Iterators.get(regions.keySet().iterator(), regionIndex);
+          HRegionInfo region = Iterators.get(regions.iterator(), regionIndex);
 
           //pick the mid split point
           int start = 0, end = Integer.MAX_VALUE;
@@ -298,7 +298,7 @@ public class TestEndToEndSplitTransaction {
   }
 
   /**
-   * Checks regions using MetaScanner, MetaTableAccessor and HTable methods
+   * Checks regions using MetaTableAccessor and HTable methods
    */
   static class RegionChecker extends ScheduledChore {
     Connection connection;
@@ -315,15 +315,13 @@ public class TestEndToEndSplitTransaction {
     }
 
     /** verify region boundaries obtained from MetaScanner */
-    void verifyRegionsUsingMetaScanner() throws Exception {
+    void verifyRegionsUsingMetaTableAccessor() throws Exception {
 
-      //MetaScanner.allTableRegions()
-      NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(connection,
+      NavigableMap<HRegionInfo, ServerName> regions = MetaTableAccessor.allTableRegions(connection,
           tableName);
       verifyTableRegions(regions.keySet());
 
-      //MetaScanner.listAllRegions()
-      List<HRegionInfo> regionList = MetaScanner.listAllRegions(conf, connection, false);
+      List<HRegionInfo> regionList = MetaTableAccessor.getAllRegions(connection, true);
       verifyTableRegions(Sets.newTreeSet(regionList));
     }
 
@@ -345,7 +343,7 @@ public class TestEndToEndSplitTransaction {
     }
 
     void verify() throws Exception {
-      verifyRegionsUsingMetaScanner();
+      verifyRegionsUsingMetaTableAccessor();
       verifyRegionsUsingHTable();
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/948746ce/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index d635ce0..2b16dce 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -78,7 +78,6 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.MetaScanner;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
@@ -1350,8 +1349,8 @@ public class TestHBaseFsck {
       setupTableWithRegionReplica(table, 2);
       assertEquals(ROWKEYS.length, countRows());
       NavigableMap<HRegionInfo, ServerName> map =
-          MetaScanner.allTableRegions(TEST_UTIL.getConnection(),
-          tbl.getName());
+          MetaTableAccessor.allTableRegions(TEST_UTIL.getConnection(),
+              tbl.getName());
       int i = 0;
       // store the HRIs of the regions we will mess up
       for (Map.Entry<HRegionInfo, ServerName> m : map.entrySet()) {
@@ -1383,7 +1382,7 @@ public class TestHBaseFsck {
       i = 0;
       HRegionInfo[] newHris = new HRegionInfo[2];
       // get all table's regions from meta
-      map = MetaScanner.allTableRegions(TEST_UTIL.getConnection(), tbl.getName());
+      map = MetaTableAccessor.allTableRegions(TEST_UTIL.getConnection(), tbl.getName());
       // get the HRIs of the new regions (hbck created new regions for fixing the hdfs mess-up)
       for (Map.Entry<HRegionInfo, ServerName> m : map.entrySet()) {
         if (m.getKey().getStartKey().length > 0 &&
@@ -2306,10 +2305,10 @@ public class TestHBaseFsck {
       // Mess it up by removing the RegionInfo for one region.
       final List<Delete> deletes = new LinkedList<Delete>();
       Table meta = connection.getTable(TableName.META_TABLE_NAME, hbfsckExecutorService);
-      MetaScanner.metaScan(connection, new MetaScanner.MetaScannerVisitor() {
+      MetaTableAccessor.fullScanRegions(connection, new MetaTableAccessor.Visitor() {
 
         @Override
-        public boolean processRow(Result rowResult) throws IOException {
+        public boolean visit(Result rowResult) throws IOException {
           HRegionInfo hri = MetaTableAccessor.getHRegionInfo(rowResult);
           if (hri != null && !hri.getTable().isSystemTable()) {
             Delete delete = new Delete(rowResult.getRow());
@@ -2318,10 +2317,6 @@ public class TestHBaseFsck {
           }
           return true;
         }
-
-        @Override
-        public void close() throws IOException {
-        }
       });
       meta.delete(deletes);
 
@@ -2652,11 +2647,14 @@ public class TestHBaseFsck {
     HBaseFsck hbck = doFsck(conf, false);
     assertNoErrors(hbck); // no errors
     try {
+      hbck.connect(); // need connection to have access to META
       hbck.checkRegionBoundaries();
     } catch (IllegalArgumentException e) {
       if (e.getMessage().endsWith("not a valid DFS filename.")) {
         fail("Table directory path is not valid." + e.getMessage());
       }
+    } finally {
+      hbck.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/948746ce/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
index 217f60b..bce8938 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
@@ -49,24 +49,27 @@ public class HbckTestingUtil {
       boolean fixReferenceFiles, boolean fixEmptyMetaRegionInfo, boolean fixTableLocks,
       TableName table) throws Exception {
     HBaseFsck fsck = new HBaseFsck(conf, exec);
-    fsck.connect();
-    fsck.setDisplayFullReport(); // i.e. -details
-    fsck.setTimeLag(0);
-    fsck.setFixAssignments(fixAssignments);
-    fsck.setFixMeta(fixMeta);
-    fsck.setFixHdfsHoles(fixHdfsHoles);
-    fsck.setFixHdfsOverlaps(fixHdfsOverlaps);
-    fsck.setFixHdfsOrphans(fixHdfsOrphans);
-    fsck.setFixTableOrphans(fixTableOrphans);
-    fsck.setFixVersionFile(fixVersionFile);
-    fsck.setFixReferenceFiles(fixReferenceFiles);
-    fsck.setFixEmptyMetaCells(fixEmptyMetaRegionInfo);
-    fsck.setFixTableLocks(fixTableLocks);
-    if (table != null) {
-      fsck.includeTable(table);
+    try {
+      fsck.connect();
+      fsck.setDisplayFullReport(); // i.e. -details
+      fsck.setTimeLag(0);
+      fsck.setFixAssignments(fixAssignments);
+      fsck.setFixMeta(fixMeta);
+      fsck.setFixHdfsHoles(fixHdfsHoles);
+      fsck.setFixHdfsOverlaps(fixHdfsOverlaps);
+      fsck.setFixHdfsOrphans(fixHdfsOrphans);
+      fsck.setFixTableOrphans(fixTableOrphans);
+      fsck.setFixVersionFile(fixVersionFile);
+      fsck.setFixReferenceFiles(fixReferenceFiles);
+      fsck.setFixEmptyMetaCells(fixEmptyMetaRegionInfo);
+      fsck.setFixTableLocks(fixTableLocks);
+      if (table != null) {
+        fsck.includeTable(table);
+      }
+      fsck.onlineHbck();
+    } finally {
+      fsck.close();
     }
-    fsck.onlineHbck();
-    fsck.close();
     return fsck;
   }
 


Mime
View raw message