hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From te...@apache.org
Subject [1/6] hbase git commit: HBASE-14140 HBase Backup/Restore Phase 3: Enhance HBaseAdmin API to include backup/restore - related API (Vladimir)
Date Wed, 25 May 2016 20:18:47 GMT
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 27c7cf6bf -> 82735499a


http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java
new file mode 100644
index 0000000..3630d87
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.BackupAdmin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestBackupAdmin extends TestBackupBase {
+  private static final Log LOG = LogFactory.getLog(TestBackupAdmin.class);
+  //implement all test cases in 1 test since incremental backup/restore has dependencies
+  @Test
+  public void TestIncBackupRestoreWithAdminAPI() throws Exception {
+    // #1 - create full backup for all tables
+    LOG.info("create full backup image for all tables");
+
+    List<TableName> tables = Lists.newArrayList(table1, table2, table3, table4);
+    HBaseAdmin admin = null;
+    BackupAdmin backupAdmin = null;
+    Connection conn = ConnectionFactory.createConnection(conf1);
+    admin = (HBaseAdmin) conn.getAdmin();
+    backupAdmin =  admin.getBackupAdmin();
+    BackupRequest request = new BackupRequest();
+    request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdFull = backupAdmin.backupTables(request);
+
+    assertTrue(checkSucceeded(backupIdFull));
+
+    // #2 - insert some data to table
+    HTable t1 = (HTable) conn.getTable(table1);
+    Put p1;
+    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+      p1 = new Put(Bytes.toBytes("row-t1" + i));
+      p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+      t1.put(p1);
+    }
+
+    Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+    t1.close();
+
+    HTable t2 =  (HTable) conn.getTable(table2);
+    Put p2;
+    for (int i = 0; i < 5; i++) {
+      p2 = new Put(Bytes.toBytes("row-t2" + i));
+      p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+      t2.put(p2);
+    }
+
+    Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
+    t2.close();
+
+    // #3 - incremental backup for multiple tables
+    tables = Lists.newArrayList(table1, table2, table3);
+    request = new BackupRequest();
+    request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
+    .setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdIncMultiple = backupAdmin.backupTables(request);
+    assertTrue(checkSucceeded(backupIdIncMultiple));
+
+    // #4 - restore full backup for all tables, without overwrite
+    TableName[] tablesRestoreFull =
+        new TableName[] { table1, table2, table3, table4 };
+
+    TableName[] tablesMapFull =
+        new TableName[] { table1_restore, table2_restore, table3_restore, table4_restore
};
+
+    RestoreRequest restoreRequest = new RestoreRequest();
+    restoreRequest.setBackupRootDir(BACKUP_ROOT_DIR).setBackupId(backupIdFull).
+      setCheck(false).setAutorestore(false).setOverwrite(false).
+      setFromTables(tablesRestoreFull).setToTables(tablesMapFull);
+    
+    backupAdmin.restore(restoreRequest);
+    
+    // #5.1 - check tables for full restore
+    
+    assertTrue(admin.tableExists(table1_restore));
+    assertTrue(admin.tableExists(table2_restore));
+    assertTrue(admin.tableExists(table3_restore));
+    assertTrue(admin.tableExists(table4_restore));
+
+
+    // #5.2 - checking row count of tables for full restore
+    HTable hTable = (HTable) conn.getTable(table1_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table2_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table3_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table4_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+    hTable.close();
+
+    // #6 - restore incremental backup for multiple tables, with overwrite
+    TableName[] tablesRestoreIncMultiple =
+        new TableName[] { table1, table2, table3 };
+    TableName[] tablesMapIncMultiple =
+        new TableName[] { table1_restore, table2_restore, table3_restore };
+    
+    restoreRequest = new RestoreRequest();
+    restoreRequest.setBackupRootDir(BACKUP_ROOT_DIR).setBackupId(backupIdIncMultiple).
+      setCheck(false).setAutorestore(false).setOverwrite(true).
+      setFromTables(tablesRestoreIncMultiple).setToTables(tablesMapIncMultiple);
+    
+    backupAdmin.restore(restoreRequest);
+    
+    hTable = (HTable) conn.getTable(table1_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH
* 2));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table2_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH
+ 5));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table3_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+    hTable.close();
+
+    // #7 - incremental backup for single, empty table
+
+    tables = toList(table4.getNameAsString());
+    request = new BackupRequest();
+    request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
+    .setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdIncEmpty = admin.getBackupAdmin().backupTables(request);
+
+
+    // #8 - restore incremental backup for single empty table, with overwrite
+    TableName[] tablesRestoreIncEmpty = new TableName[] { table4 };
+    TableName[] tablesMapIncEmpty = new TableName[] { table4_restore };
+    
+    restoreRequest = new RestoreRequest();
+    restoreRequest.setBackupRootDir(BACKUP_ROOT_DIR).setBackupId(backupIdIncEmpty).
+      setCheck(false).setAutorestore(false).setOverwrite(true).
+      setFromTables(tablesRestoreIncEmpty).setToTables(tablesMapIncEmpty);
+    
+    backupAdmin.restore(restoreRequest);   
+
+    hTable = (HTable) conn.getTable(table4_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+    hTable.close();
+    admin.close();
+    conn.close();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index 8358e47..3678df2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.BackupAdmin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -116,7 +117,7 @@ public class TestBackupBase {
     createTables();
   }
   
-  static void waitForSystemTable() throws Exception
+  public static void waitForSystemTable() throws Exception
   {
     try(Admin admin = TEST_UTIL.getAdmin();) {
       while (!admin.tableExists(BackupSystemTable.getTableName()) 
@@ -144,14 +145,19 @@ public class TestBackupBase {
       throws IOException {
     Connection conn = null;
     HBaseAdmin admin = null;
+    BackupAdmin badmin = null;
     String backupId;
     try {
       conn = ConnectionFactory.createConnection(conf1);
       admin = (HBaseAdmin) conn.getAdmin();
       BackupRequest request = new BackupRequest();
       request.setBackupType(type).setTableList(tables).setTargetRootDir(path);
-      backupId = admin.backupTables(request);
+      badmin = admin.getBackupAdmin();
+      backupId = badmin.backupTables(request);
     } finally {
+      if(badmin != null){
+        badmin.close();
+      }
       if (admin != null) {
         admin.close();
       }
@@ -229,15 +235,33 @@ public class TestBackupBase {
     }
   }
 
-  protected BackupClient getBackupClient(){
-    return BackupRestoreFactory.getBackupClient(conf1);
-  }
+//  protected BackupClient getBackupClient(){
+//    return BackupRestoreClientFactory.getBackupClient(conf1);
+//  }
   
-  protected RestoreClient getRestoreClient()
-  {
-    return BackupRestoreFactory.getRestoreClient(conf1);
-  }
+//  protected RestoreClient getRestoreClient()
+//  {
+//    return BackupRestoreClientFactory.getRestoreClient(conf1);
+//  }
 
+  protected BackupAdmin getBackupAdmin() throws IOException {
+    return TEST_UTIL.getAdmin().getBackupAdmin();
+  }
+  
+  /**
+   * Get restore request.
+   *  
+   */
+  public  RestoreRequest createRestoreRequest(
+      String backupRootDir,
+      String backupId, boolean check, boolean autoRestore, TableName[] fromTables,
+      TableName[] toTables, boolean isOverwrite) {
+    RestoreRequest request = new RestoreRequest();
+    request.setBackupRootDir(backupRootDir).setBackupId(backupId).setCheck(check).setAutorestore(autoRestore).
+    setFromTables(fromTables).setToTables(toTables).setOverwrite(isOverwrite);
+    return request;
+}
+  
   /**
    * Helper method
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
index eeb89b5..192d8e8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
@@ -51,16 +51,10 @@ public class TestBackupDelete extends TestBackupBase {
     String backupId = fullTableBackup(tableList);
     assertTrue(checkSucceeded(backupId));
     LOG.info("backup complete");
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    System.setOut(new PrintStream(baos));
-
     String[] backupIds = new String[] { backupId };
-    getBackupClient().deleteBackups(backupIds);
-
+    int deleted = getBackupAdmin().deleteBackups(backupIds);
+    assertTrue(1 == deleted);
     LOG.info("delete_backup");
-    String output = baos.toString();
-    LOG.info(baos.toString());
-    assertTrue(output.indexOf("Delete backup for backupID=" + backupId + " completed.") >=
0);
   }
 
   /**
@@ -90,7 +84,7 @@ public class TestBackupDelete extends TestBackupBase {
     LOG.info("delete_backup");
     String output = baos.toString();
     LOG.info(baos.toString());
-    assertTrue(output.indexOf("Delete backup for backupID=" + backupId + " completed.") >=
0);
+    assertTrue(output.indexOf("Deleted 1 backups") >= 0);
   }  
   
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
index 4f7cb11..4a35a06 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
@@ -58,7 +58,7 @@ public class TestBackupDescribe extends TestBackupBase {
     assertTrue(checkSucceeded(backupId));
 
 
-    BackupInfo info = getBackupClient().getBackupInfo(backupId);
+    BackupInfo info = getBackupAdmin().getBackupInfo(backupId);
     assertTrue(info.getState() == BackupState.COMPLETE);
 
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
index 716a22a..a7d2750 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
@@ -54,7 +54,7 @@ public class TestBackupShowHistory extends TestBackupBase {
     assertTrue(checkSucceeded(backupId));
     LOG.info("backup complete");
 
-    List<BackupInfo> history = getBackupClient().getHistory(10);
+    List<BackupInfo> history = getBackupAdmin().getHistory(10);
     assertTrue(history.size() > 0);
     boolean success = false;
     for(BackupInfo info: history){

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
index ce04b0b..be4019b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
@@ -55,9 +55,9 @@ public class TestBackupStatusProgress extends TestBackupBase {
     assertTrue(checkSucceeded(backupId));
 
 
-    BackupInfo info = getBackupClient().getBackupInfo(backupId);    
+    BackupInfo info = getBackupAdmin().getBackupInfo(backupId);    
     assertTrue(info.getState() == BackupState.COMPLETE);
-    int p = getBackupClient().getProgress(backupId);
+    int p = getBackupAdmin().getProgress(backupId);
     LOG.debug(info.getShortDescription());
     assertTrue(p > 0);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
index 7ea4338..dfda854 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
@@ -341,10 +341,10 @@ public class TestBackupSystemTable {
 
     table.addWALFiles(files, "backup", "root");
 
-    assertTrue(table.checkWALFile(files.get(0)));
-    assertTrue(table.checkWALFile(files.get(1)));
-    assertTrue(table.checkWALFile(files.get(2)));
-    assertFalse(table.checkWALFile(newFile));
+    assertTrue(table.isWALFileDeletable(files.get(0)));
+    assertTrue(table.isWALFileDeletable(files.get(1)));
+    assertTrue(table.isWALFileDeletable(files.get(2)));
+    assertFalse(table.isWALFileDeletable(newFile));
 
     cleanBackupTable();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
index 0c0bf4a..3d37d07 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
@@ -1,14 +1,20 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information
regarding
- * copyright ownership. The ASF licenses this file to you under the Apache License, Version
2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain
a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
- * law or agreed to in writing, software distributed under the License is distributed on
an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License
- * for the specific language governing permissions and limitations under the License.
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.backup;
 
 import static org.junit.Assert.assertTrue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
index 983b850..1e23842 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
@@ -20,6 +20,7 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.BackupAdmin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.util.ToolRunner;
@@ -50,8 +51,8 @@ public class TestFullRestore extends TestBackupBase {
 
     TableName[] tableset = new TableName[] { table1 };
     TableName[] tablemap = new TableName[] { table1_restore };
-    RestoreClient client = getRestoreClient();
-    client.restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap, false);
+    BackupAdmin client = getBackupAdmin();
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, false, tableset,
tablemap, false));
     HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
     assertTrue(hba.tableExists(table1_restore));
     TEST_UTIL.deleteTable(table1_restore);
@@ -69,7 +70,7 @@ public class TestFullRestore extends TestBackupBase {
     LOG.info("backup complete");
     assertTrue(checkSucceeded(backupId));
     //restore <backup_root_path> <backup_id> <tables> [tableMapping]
-    String[] args = new String[]{"restore",  BACKUP_ROOT_DIR, backupId, 
+    String[] args = new String[]{BACKUP_ROOT_DIR, backupId, 
         table1.getNameAsString(), table1_restore.getNameAsString() }; 
     // Run backup
     int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
@@ -94,9 +95,9 @@ public class TestFullRestore extends TestBackupBase {
 
     TableName[] restore_tableset = new TableName[] { table2, table3 };
     TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
-    RestoreClient client = getRestoreClient();
-    client.restore(BACKUP_ROOT_DIR, backupId, false, false,
-      restore_tableset, tablemap, false);
+    BackupAdmin client = getBackupAdmin();
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, false,
+      restore_tableset, tablemap, false));
     HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
     assertTrue(hba.tableExists(table2_restore));
     assertTrue(hba.tableExists(table3_restore));
@@ -121,7 +122,7 @@ public class TestFullRestore extends TestBackupBase {
     
     
     //restore <backup_root_path> <backup_id> <tables> [tableMapping]
-    String[] args = new String[]{"restore",  BACKUP_ROOT_DIR, backupId, 
+    String[] args = new String[]{BACKUP_ROOT_DIR, backupId, 
         StringUtils.join(restore_tableset, ","), 
         StringUtils.join(tablemap, ",") }; 
     // Run backup
@@ -152,9 +153,9 @@ public class TestFullRestore extends TestBackupBase {
     LOG.info("backup complete");
 
     TableName[] tableset = new TableName[] { table1 };
-    RestoreClient client = getRestoreClient();
-    client.restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, null,
-      true);
+    BackupAdmin client = getBackupAdmin();
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, false, tableset,
null,
+      true));
   }
 
   /**
@@ -171,7 +172,7 @@ public class TestFullRestore extends TestBackupBase {
     LOG.info("backup complete");
     TableName[] tableset = new TableName[] { table1 };
     //restore <backup_root_path> <backup_id> <tables> [tableMapping]
-    String[] args = new String[]{"restore",  BACKUP_ROOT_DIR, backupId, 
+    String[] args = new String[]{BACKUP_ROOT_DIR, backupId, 
         StringUtils.join(tableset, ","), "-overwrite" }; 
     // Run restore
     int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
@@ -196,9 +197,9 @@ public class TestFullRestore extends TestBackupBase {
     assertTrue(checkSucceeded(backupId));    
 
     TableName[] restore_tableset = new TableName[] { table2, table3 };
-    RestoreClient client = getRestoreClient();
-    client.restore(BACKUP_ROOT_DIR, backupId, false,
-      false, restore_tableset, null, true);
+    BackupAdmin client = getBackupAdmin();
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false,
+      false, restore_tableset, null, true));
   }
 
   /**
@@ -215,7 +216,7 @@ public class TestFullRestore extends TestBackupBase {
 
     TableName[] restore_tableset = new TableName[] { table2, table3 };
     //restore <backup_root_path> <backup_id> <tables> [tableMapping]
-    String[] args = new String[]{"restore",  BACKUP_ROOT_DIR, backupId, 
+    String[] args = new String[]{BACKUP_ROOT_DIR, backupId, 
         StringUtils.join(restore_tableset, ","), "-overwrite" }; 
     // Run backup
     int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
@@ -243,9 +244,9 @@ public class TestFullRestore extends TestBackupBase {
 
     TableName[] tableset = new TableName[] { TableName.valueOf("faketable") };
     TableName[] tablemap = new TableName[] { table1_restore };
-    RestoreClient client = getRestoreClient();
-    client.restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
-      false);
+    BackupAdmin client = getBackupAdmin();
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, false, tableset,
tablemap,
+      false));
   }
 
   
@@ -265,7 +266,7 @@ public class TestFullRestore extends TestBackupBase {
 
     TableName[] tableset = new TableName[] { TableName.valueOf("faketable") };
     TableName[] tablemap = new TableName[] { table1_restore };
-    String[] args = new String[]{"restore",  BACKUP_ROOT_DIR, backupId, 
+    String[] args = new String[]{BACKUP_ROOT_DIR, backupId, 
         StringUtils.join(tableset, ","), 
         StringUtils.join(tablemap, ",") }; 
     // Run restore
@@ -289,9 +290,9 @@ public class TestFullRestore extends TestBackupBase {
     TableName[] restore_tableset
       = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2")
};
     TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
-    RestoreClient client = getRestoreClient();
-    client.restore(BACKUP_ROOT_DIR, backupId, false,
-      false, restore_tableset, tablemap, false);
+    BackupAdmin client = getBackupAdmin();
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false,
+      false, restore_tableset, tablemap, false));
   }
   
   /**
@@ -310,7 +311,7 @@ public class TestFullRestore extends TestBackupBase {
     TableName[] restore_tableset
       = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2")
};
     TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
-    String[] args = new String[]{"restore",  BACKUP_ROOT_DIR, backupId, 
+    String[] args = new String[]{BACKUP_ROOT_DIR, backupId, 
         StringUtils.join(restore_tableset, ","), 
         StringUtils.join(tablemap, ",") }; 
     // Run restore

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
index 04ee015..988b156 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -24,11 +24,8 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.BackupAdmin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -59,7 +56,7 @@ public class TestIncrementalBackup extends TestBackupBase {
 
     BackupRequest request = new BackupRequest();
     request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdFull = admin.backupTables(request);
+    String backupIdFull = admin.getBackupAdmin().backupTables(request);
 
     assertTrue(checkSucceeded(backupIdFull));
 
@@ -91,7 +88,7 @@ public class TestIncrementalBackup extends TestBackupBase {
     request = new BackupRequest();
     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
     .setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdIncMultiple = admin.backupTables(request);
+    String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request);
     assertTrue(checkSucceeded(backupIdIncMultiple));
 
     // #4 - restore full backup for all tables, without overwrite
@@ -101,10 +98,10 @@ public class TestIncrementalBackup extends TestBackupBase {
     TableName[] tablesMapFull =
         new TableName[] { table1_restore, table2_restore, table3_restore, table4_restore
};
 
-    RestoreClient client = getRestoreClient();
-    client.restore(BACKUP_ROOT_DIR, backupIdFull, false, false,
+    BackupAdmin client = getBackupAdmin();
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, false,
       tablesRestoreFull,
-      tablesMapFull, false);
+      tablesMapFull, false));
 
     // #5.1 - check tables for full restore
     HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
@@ -137,9 +134,8 @@ public class TestIncrementalBackup extends TestBackupBase {
         new TableName[] { table1, table2, table3 };
     TableName[] tablesMapIncMultiple =
         new TableName[] { table1_restore, table2_restore, table3_restore };
-    client = getRestoreClient();
-    client.restore(BACKUP_ROOT_DIR, backupIdIncMultiple, false, false,
-      tablesRestoreIncMultiple, tablesMapIncMultiple, true);
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, false,
+      tablesRestoreIncMultiple, tablesMapIncMultiple, true));
 
     hTable = (HTable) conn.getTable(table1_restore);
     Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH
* 2));
@@ -159,16 +155,16 @@ public class TestIncrementalBackup extends TestBackupBase {
     request = new BackupRequest();
     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
     .setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdIncEmpty = admin.backupTables(request);
+    String backupIdIncEmpty = admin.getBackupAdmin().backupTables(request);
 
 
     // #8 - restore incremental backup for single empty table, with overwrite
     TableName[] tablesRestoreIncEmpty = new TableName[] { table4 };
     TableName[] tablesMapIncEmpty = new TableName[] { table4_restore };
 
-    getRestoreClient().restore(BACKUP_ROOT_DIR, backupIdIncEmpty, false, false,
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncEmpty, false, false,
       tablesRestoreIncEmpty,
-      tablesMapIncEmpty, true);
+      tablesMapIncEmpty, true));
 
     hTable = (HTable) conn.getTable(table4_restore);
     Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java
index b3cf4ee..c67f63f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java
@@ -25,6 +25,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.BackupAdmin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HTable;
@@ -97,9 +98,9 @@ public class TestIncrementalBackupNoDataLoss extends TestBackupBase {
       TEST_UTIL.deleteTable(table2_restore);
     }
 
-    RestoreClient client = getRestoreClient();
-    client.restore(BACKUP_ROOT_DIR, backupIdInc1, false, true, tablesRestoreInc1,
-      tablesMapInc1, false);
+    BackupAdmin client = getBackupAdmin();
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdInc1, false, true, tablesRestoreInc1,
+      tablesMapInc1, false));
 
     HTable hTable = (HTable) conn.getTable(table1_restore);
     Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH
* 2));
@@ -110,9 +111,8 @@ public class TestIncrementalBackupNoDataLoss extends TestBackupBase {
     TableName[] tablesRestoreInc2 = new TableName[] { table2 };
     TableName[] tablesMapInc2 = new TableName[] { table2_restore };
 
-    client = getRestoreClient();
-    client.restore(BACKUP_ROOT_DIR, backupIdInc2, false, true, tablesRestoreInc2,
-      tablesMapInc2, false);
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdInc2, false, true, tablesRestoreInc2,
+      tablesMapInc2, false));
 
     hTable = (HTable) conn.getTable(table2_restore);
     Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH
+ 5));

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
index 32a028c..f40ed9d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
@@ -39,8 +39,8 @@ public class TestRemoteRestore extends TestBackupBase {
     LOG.info("backup complete");
     TableName[] tableset = new TableName[] { table1 };
     TableName[] tablemap = new TableName[] { table1_restore };
-    getRestoreClient().restore(BACKUP_REMOTE_ROOT_DIR, backupId, false, false, tableset,
-      tablemap, false);
+    getBackupAdmin().restore(createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false,
false, tableset,
+      tablemap, false));
     HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
     assertTrue(hba.tableExists(table1_restore));
     TEST_UTIL.deleteTable(table1_restore);

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
index 652a909..c86b4b4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
@@ -46,8 +46,8 @@ public class TestRestoreBoundaryTests extends TestBackupBase {
     LOG.info("backup complete");
     TableName[] tableset = new TableName[] { table1 };
     TableName[] tablemap = new TableName[] { table1_restore };
-    getRestoreClient().restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
-      false);
+    getBackupAdmin().restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, false,
tableset, tablemap,
+      false));
     HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
     assertTrue(hba.tableExists(table1_restore));
     TEST_UTIL.deleteTable(table1_restore);
@@ -65,9 +65,9 @@ public class TestRestoreBoundaryTests extends TestBackupBase {
     String backupId = fullTableBackup(tables);
     TableName[] restore_tableset = new TableName[] { table2, table3};
     TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
-    getRestoreClient().restore(BACKUP_ROOT_DIR, backupId, false, false, restore_tableset,
+    getBackupAdmin().restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, false,
restore_tableset,
       tablemap,
-      false);
+      false));
     HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
     assertTrue(hba.tableExists(table2_restore));
     assertTrue(hba.tableExists(table3_restore));

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index 5e302d2..5922f21 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -83,6 +83,9 @@ public class TestMetaWithReplicas {
     TEST_UTIL.getConfiguration().setInt(
         StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000);
     TEST_UTIL.startMiniCluster(3);
+    
+    TEST_UTIL.waitUntilAllSystemRegionsAssigned();
+
     // disable the balancer
     LoadBalancerTracker l = new LoadBalancerTracker(TEST_UTIL.getZooKeeperWatcher(),
         new Abortable() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index 7a4baf3..ce66ee4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -77,6 +77,7 @@ public class TestMasterOperationsForRegionReplicas {
     while(ADMIN.getClusterStatus().getServers().size() < numSlaves) {
       Thread.sleep(100);
     }
+    TEST_UTIL.waitUntilAllSystemRegionsAssigned();
   }
 
   @AfterClass
@@ -305,7 +306,7 @@ public class TestMasterOperationsForRegionReplicas {
       connection);
     snapshot.initialize();
     Map<HRegionInfo, ServerName> regionToServerMap = snapshot.getRegionToRegionServerMap();
-    assert(regionToServerMap.size() == numRegions * numReplica + 1); //'1' for the namespace
+    assert(regionToServerMap.size() == numRegions * numReplica + 2); //'1' for the namespace
     Map<ServerName, List<HRegionInfo>> serverToRegionMap = snapshot.getRegionServerToRegionMap();
     for (Map.Entry<ServerName, List<HRegionInfo>> entry : serverToRegionMap.entrySet())
{
       if (entry.getKey().equals(util.getHBaseCluster().getMaster().getServerName())) {
@@ -332,14 +333,14 @@ public class TestMasterOperationsForRegionReplicas {
       connection);
     snapshot.initialize();
     Map<HRegionInfo, ServerName>  regionToServerMap = snapshot.getRegionToRegionServerMap();
-    assertEquals(regionToServerMap.size(), numRegions * numReplica + 1); //'1' for the namespace
+    assertEquals(regionToServerMap.size(), numRegions * numReplica + 2); //'2' for the ns,
backup
     Map<ServerName, List<HRegionInfo>> serverToRegionMap = snapshot.getRegionServerToRegionMap();
     assertEquals(serverToRegionMap.keySet().size(), 2); // 1 rs + 1 master
     for (Map.Entry<ServerName, List<HRegionInfo>> entry : serverToRegionMap.entrySet())
{
       if (entry.getKey().equals(TEST_UTIL.getHBaseCluster().getMaster().getServerName()))
{
         continue;
       }
-      assertEquals(entry.getValue().size(), numRegions * numReplica);
+      assertEquals(entry.getValue().size(), numRegions * numReplica +1);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index b1864d2..c198d55 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -121,8 +121,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
     admin = connection.getAdmin();
     admin.setBalancerRunning(false, true);
 
-    TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
-    TEST_UTIL.waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
+    TEST_UTIL.waitUntilAllSystemRegionsAssigned();
+    
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
index e767f3a..cab82ad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
@@ -104,7 +104,7 @@ public class OfflineMetaRebuildTestCore {
     tableIdx++;
     htbl = setupTable(table);
     populateTable(htbl);
-    assertEquals(5, scanMeta());
+    assertEquals(6, scanMeta());
     LOG.info("Table " + table + " has " + tableRowCount(conf, table)
         + " entries.");
     assertEquals(16, tableRowCount(conf, table));

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java
index 17a208f..4e03846 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java
@@ -51,7 +51,7 @@ public class TestOfflineMetaRebuildBase extends OfflineMetaRebuildTestCore
{
     wipeOutMeta();
 
     // is meta really messed up?
-    assertEquals(1, scanMeta());
+    assertEquals(2, scanMeta());
     assertErrors(doFsck(conf, false),
         new ERROR_CODE[] {
             ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
@@ -81,7 +81,7 @@ public class TestOfflineMetaRebuildBase extends OfflineMetaRebuildTestCore
{
       LOG.info("No more RIT in ZK, now doing final test verification");
 
       // everything is good again.
-      assertEquals(5, scanMeta()); // including table state rows
+      assertEquals(6, scanMeta()); // including table state rows
       TableName[] tableNames = TEST_UTIL.getHBaseAdmin().listTableNames();
       for (TableName tableName : tableNames) {
         HTableDescriptor tableDescriptor = TEST_UTIL.getHBaseAdmin().getTableDescriptor(tableName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java
index 615487d..28f3392 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java
@@ -52,7 +52,7 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore
{
     wipeOutMeta();
 
     // is meta really messed up?
-    assertEquals(1, scanMeta());
+    assertEquals(2, scanMeta());
     assertErrors(doFsck(conf, false), new ERROR_CODE[] {
         ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
         ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
@@ -86,7 +86,7 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore
{
     }
 
     // Meta still messed up.
-    assertEquals(1, scanMeta());
+    assertEquals(2, scanMeta());
     HTableDescriptor[] htbls = getTables(TEST_UTIL.getConfiguration());
     LOG.info("Tables present after restart: " + Arrays.toString(htbls));
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java
index 40ba86a..4558d07 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java
@@ -55,7 +55,7 @@ public class TestOfflineMetaRebuildOverlap extends OfflineMetaRebuildTestCore
{
     wipeOutMeta();
 
     // is meta really messed up?
-    assertEquals(1, scanMeta());
+    assertEquals(2, scanMeta());
     assertErrors(doFsck(conf, false),
         new ERROR_CODE[] {
             ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
@@ -94,7 +94,7 @@ public class TestOfflineMetaRebuildOverlap extends OfflineMetaRebuildTestCore
{
     }
 
     // Meta still messed up.
-    assertEquals(1, scanMeta());
+    assertEquals(2, scanMeta());
     HTableDescriptor[] htbls = getTables(TEST_UTIL.getConfiguration());
     LOG.info("Tables present after restart: " + Arrays.toString(htbls));
 


Mime
View raw message