hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From te...@apache.org
Subject hbase git commit: HBASE-16039 Incremental backup action failed with NPE when table in full backup is deleted in between (Vladimir)
Date Thu, 16 Jun 2016 17:41:34 GMT
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 3f8cb39b1 -> 1e9beddfc


HBASE-16039 Incremental backup action failed with NPE when table in full backup is deleted
in between (Vladimir)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e9beddf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e9beddf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e9beddf

Branch: refs/heads/HBASE-7912
Commit: 1e9beddfc9eb6929d97fababab1e8c7e8cfd1334
Parents: 3f8cb39
Author: tedyu <yuzhihong@gmail.com>
Authored: Thu Jun 16 10:41:08 2016 -0700
Committer: tedyu <yuzhihong@gmail.com>
Committed: Thu Jun 16 10:41:08 2016 -0700

----------------------------------------------------------------------
 .../hbase/backup/util/BackupServerUtil.java     |   4 +
 .../org/apache/hadoop/hbase/master/HMaster.java |  11 ++
 .../TestIncrementalBackupDeleteTable.java       | 139 +++++++++++++++++++
 3 files changed, 154 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e9beddf/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java
index e008836..265ef6c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java
@@ -168,6 +168,10 @@ public final class BackupServerUtil {
 
       for (TableName table : backupContext.getTables()) {
 
+        if(!admin.tableExists(table)) {
+          LOG.warn("Table "+ table+" does not exists, skipping it.");
+          continue;
+        }
         LOG.debug("Attempting to copy table info for:" + table);
         TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e9beddf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 30da779..0a0fdf4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2659,6 +2659,8 @@ public class HMaster extends HRegionServer implements MasterServices
{
         if (type == BackupType.INCREMENTAL ) {
           LOG.warn("Incremental backup table set contains non-exising table: "
               + nonExistingTableList);
+          // Update incremental backup set 
+          tableList = excludeNonExistingTables(tableList, nonExistingTableList);
         } else {
           // Throw exception only in full mode - we try to backup non-existing table
           throw new DoNotRetryIOException("Non-existing tables found in the table list: "
@@ -2678,6 +2680,15 @@ public class HMaster extends HRegionServer implements MasterServices
{
     return new Pair<>(procId, backupId);
   }
 
+  private List<TableName> excludeNonExistingTables(List<TableName> tableList,
+      List<TableName> nonExistingTableList) {
+    
+    for(TableName table: nonExistingTableList) {
+      tableList.remove(table);
+    }
+    return tableList;
+  }
+
   /**
    * Returns the list of table descriptors that match the specified request
    * @param namespace the namespace to query, or null if querying for all

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e9beddf/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
new file mode 100644
index 0000000..ad42d48
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.BackupAdmin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+/**
+ * 
+ *  1. Create table t1, t2
+ *  2. Load data to t1, t2
+ *  3  Full backup t1, t2
+ *  4  Delete t2
+ *  5  Load data to t1
+ *  6  Incremental backup t1
+ */
+@Category(LargeTests.class)
+public class TestIncrementalBackupDeleteTable extends TestBackupBase {
+  private static final Log LOG = LogFactory.getLog(TestIncrementalBackupDeleteTable.class);
+  //implement all test cases in 1 test since incremental backup/restore has dependencies
+  @Test
+  public void TestIncBackupDeleteTable() throws Exception {
+    // #1 - create full backup for all tables
+    LOG.info("create full backup image for all tables");
+
+    List<TableName> tables = Lists.newArrayList(table1, table2);
+    HBaseAdmin admin = null;
+    Connection conn = ConnectionFactory.createConnection(conf1);
+    admin = (HBaseAdmin) conn.getAdmin();
+
+    BackupRequest request = new BackupRequest();
+    request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdFull = admin.getBackupAdmin().backupTables(request);
+
+    assertTrue(checkSucceeded(backupIdFull));
+
+    // #2 - insert some data to table table1
+    HTable t1 = (HTable) conn.getTable(table1);
+    Put p1;
+    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+      p1 = new Put(Bytes.toBytes("row-t1" + i));
+      p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+      t1.put(p1);
+    }
+
+    Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+    t1.close();
+
+    // Delete table table2
+    admin.disableTable(table2);
+    admin.deleteTable(table2);
+    
+    // #3 - incremental backup for table1
+    tables = Lists.newArrayList(table1);
+    request = new BackupRequest();
+    request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
+    .setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request);
+    assertTrue(checkSucceeded(backupIdIncMultiple));
+
+    // #4 - restore full backup for all tables, without overwrite
+    TableName[] tablesRestoreFull =
+        new TableName[] { table1, table2};
+
+    TableName[] tablesMapFull =
+        new TableName[] { table1_restore, table2_restore };
+
+    BackupAdmin client = getBackupAdmin();
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, false,
+      tablesRestoreFull,
+      tablesMapFull, false));
+
+    // #5.1 - check tables for full restore
+    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hAdmin.tableExists(table1_restore));
+    assertTrue(hAdmin.tableExists(table2_restore));
+
+
+    // #5.2 - checking row count of tables for full restore
+    HTable hTable = (HTable) conn.getTable(table1_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table2_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+    hTable.close();
+
+
+    // #6 - restore incremental backup for table1
+    TableName[] tablesRestoreIncMultiple =
+        new TableName[] { table1 };
+    TableName[] tablesMapIncMultiple =
+        new TableName[] { table1_restore };
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, false,
+      tablesRestoreIncMultiple, tablesMapIncMultiple, true));
+
+    hTable = (HTable) conn.getTable(table1_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH
* 2));
+    hTable.close();
+    admin.close();
+    conn.close();
+  }
+
+}


Mime
View raw message