hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From te...@apache.org
Subject hbase git commit: HBASE-16602 Avoid creating Connection in BackupLogCleaner#getDeletableFiles()
Date Mon, 12 Sep 2016 19:57:03 GMT
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 da8840325 -> 44812cf1e


HBASE-16602 Avoid creating Connection in BackupLogCleaner#getDeletableFiles()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/44812cf1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/44812cf1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/44812cf1

Branch: refs/heads/HBASE-7912
Commit: 44812cf1ed6255649bd0d67b1cfe46940f11fc1a
Parents: da88403
Author: tedyu <yuzhihong@gmail.com>
Authored: Mon Sep 12 12:56:52 2016 -0700
Committer: tedyu <yuzhihong@gmail.com>
Committed: Mon Sep 12 12:56:52 2016 -0700

----------------------------------------------------------------------
 .../hbase/backup/master/BackupLogCleaner.java   |  30 +++-
 .../hbase/backup/TestBackupLogCleaner.java      | 159 ------------------
 .../backup/master/TestBackupLogCleaner.java     | 163 +++++++++++++++++++
 3 files changed, 188 insertions(+), 164 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/44812cf1/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
index 1f1fe32..aa767b1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.backup.master;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -34,8 +35,12 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Implementation of a log cleaner that checks if a log is still scheduled for
  * incremental backup before deleting it when its TTL is over.
@@ -46,11 +51,30 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
   private static final Log LOG = LogFactory.getLog(BackupLogCleaner.class);
 
   private boolean stopped = false;
+  private Connection conn;
 
   public BackupLogCleaner() {
   }
 
   @Override
+  public void init(Map<String, Object> params) {
+    if (params != null && params.containsKey(HMaster.MASTER)) {
+      MasterServices master = (MasterServices) params.get(HMaster.MASTER);
+      conn = master.getConnection();
+      if (getConf() == null) {
+        super.setConf(conn.getConfiguration());
+      }
+    }
+    if (conn == null) {
+      try {
+        conn = ConnectionFactory.createConnection(getConf());
+      } catch (IOException ioe) {
+        throw new RuntimeException("Failed to create connection", ioe);
+      }
+    }
+  }
+
+  @Override
   public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
     // all members of this class are null if backup is disabled,
     // so we cannot filter the files
@@ -59,11 +83,7 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
     }
     
     List<FileStatus> list = new ArrayList<FileStatus>();
-    // TODO: LogCleaners do not have a way to get the Connection from Master. We should find
a
-    // way to pass it down here, so that this connection is not re-created every time.
-    // It is expensive
-    try (final Connection conn = ConnectionFactory.createConnection(getConf());
-        final BackupSystemTable table = new BackupSystemTable(conn)) {
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       // If we do not have recorded backup sessions
       try {
         if (!table.hasBackupSessions()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/44812cf1/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
deleted file mode 100644
index 3ef68e6..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.master.BackupLogCleaner;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.wal.DefaultWALProvider;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-
-@Category(LargeTests.class)
-public class TestBackupLogCleaner extends TestBackupBase {
-  private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class);
-
-  // implements all test cases in 1 test since incremental full backup/
-  // incremental backup has dependencies
-  @Test
-  public void testBackupLogCleaner() throws Exception {
-
-    // #1 - create full backup for all tables
-    LOG.info("create full backup image for all tables");
-
-    List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, table3, table4);
-
-    try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection()))
{
-      // Verify that we have no backup sessions yet
-      assertFalse(systemTable.hasBackupSessions());
-
-      List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
-      List<String> swalFiles = convert(walFiles);
-      BackupLogCleaner cleaner = new BackupLogCleaner();
-      cleaner.setConf(TEST_UTIL.getConfiguration());
-
-      Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
-      int size = Iterables.size(deletable);
-
-      // We can delete all files because we do not have yet recorded backup sessions
-      assertTrue(size == walFiles.size());
-
-      systemTable.addWALFiles(swalFiles, "backup", "root");
-      String backupIdFull = fullTableBackup(tableSetFullList);
-      assertTrue(checkSucceeded(backupIdFull));
-      // Check one more time
-      deletable = cleaner.getDeletableFiles(walFiles);
-      // We can delete wal files because they were saved into hbase:backup table
-      size = Iterables.size(deletable);
-      assertTrue(size == walFiles.size());
-
-      List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
-      LOG.debug("WAL list after full backup");
-      convert(newWalFiles);
-
-      // New list of wal files is greater than the previous one,
-      // because new wal per RS have been opened after full backup
-      assertTrue(walFiles.size() < newWalFiles.size());
-      Connection conn = ConnectionFactory.createConnection(conf1);
-      // #2 - insert some data to table
-      HTable t1 = (HTable) conn.getTable(table1);
-      Put p1;
-      for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
-        p1 = new Put(Bytes.toBytes("row-t1" + i));
-        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-        t1.put(p1);
-      }
-
-      t1.close();
-
-      HTable t2 = (HTable) conn.getTable(table2);
-      Put p2;
-      for (int i = 0; i < 5; i++) {
-        p2 = new Put(Bytes.toBytes("row-t2" + i));
-        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-        t2.put(p2);
-      }
-
-      t2.close();
-
-      // #3 - incremental backup for multiple tables
-
-      List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
-      String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList,
-        BACKUP_ROOT_DIR);
-      assertTrue(checkSucceeded(backupIdIncMultiple));
-      deletable = cleaner.getDeletableFiles(newWalFiles);
-
-      assertTrue(Iterables.size(deletable) == newWalFiles.size());
-
-      conn.close();
-    }
-  }
-
-  private List<String> convert(List<FileStatus> walFiles) {
-    List<String> result = new ArrayList<String>();
-    for (FileStatus fs : walFiles) {
-      LOG.debug("+++WAL: " + fs.getPath().toString());
-      result.add(fs.getPath().toString());
-    }
-    return result;
-  }
-
-  private List<FileStatus> getListOfWALFiles(Configuration c) throws IOException {
-    Path logRoot = new Path(FSUtils.getRootDir(c), HConstants.HREGION_LOGDIR_NAME);
-    FileSystem fs = FileSystem.get(c);
-    RemoteIterator<LocatedFileStatus> it = fs.listFiles(logRoot, true);
-    List<FileStatus> logFiles = new ArrayList<FileStatus>();
-    while (it.hasNext()) {
-      LocatedFileStatus lfs = it.next();
-      if (lfs.isFile() && !DefaultWALProvider.isMetaFile(lfs.getPath())) {
-        logFiles.add(lfs);
-        LOG.info(lfs);
-      }
-    }
-    return logFiles;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/44812cf1/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
new file mode 100644
index 0000000..c74edb0
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.master;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.TestBackupBase;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.master.BackupLogCleaner;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestBackupLogCleaner extends TestBackupBase {
+  private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class);
+
+  // implements all test cases in 1 test since incremental full backup/
+  // incremental backup has dependencies
+  @Test
+  public void testBackupLogCleaner() throws Exception {
+
+    // #1 - create full backup for all tables
+    LOG.info("create full backup image for all tables");
+
+    List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, table3, table4);
+
+    try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection()))
{
+      // Verify that we have no backup sessions yet
+      assertFalse(systemTable.hasBackupSessions());
+
+      List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
+      List<String> swalFiles = convert(walFiles);
+      BackupLogCleaner cleaner = new BackupLogCleaner();
+      cleaner.setConf(TEST_UTIL.getConfiguration());
+      cleaner.init(null);
+      cleaner.setConf(TEST_UTIL.getConfiguration());
+
+      Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
+      int size = Iterables.size(deletable);
+
+      // We can delete all files because we do not have yet recorded backup sessions
+      assertTrue(size == walFiles.size());
+
+      systemTable.addWALFiles(swalFiles, "backup", "root");
+      String backupIdFull = fullTableBackup(tableSetFullList);
+      assertTrue(checkSucceeded(backupIdFull));
+      // Check one more time
+      deletable = cleaner.getDeletableFiles(walFiles);
+      // We can delete wal files because they were saved into hbase:backup table
+      size = Iterables.size(deletable);
+      assertTrue(size == walFiles.size());
+
+      List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
+      LOG.debug("WAL list after full backup");
+      convert(newWalFiles);
+
+      // New list of wal files is greater than the previous one,
+      // because new wal per RS have been opened after full backup
+      assertTrue(walFiles.size() < newWalFiles.size());
+      Connection conn = ConnectionFactory.createConnection(conf1);
+      // #2 - insert some data to table
+      HTable t1 = (HTable) conn.getTable(table1);
+      Put p1;
+      for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+        p1 = new Put(Bytes.toBytes("row-t1" + i));
+        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+        t1.put(p1);
+      }
+
+      t1.close();
+
+      HTable t2 = (HTable) conn.getTable(table2);
+      Put p2;
+      for (int i = 0; i < 5; i++) {
+        p2 = new Put(Bytes.toBytes("row-t2" + i));
+        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+        t2.put(p2);
+      }
+
+      t2.close();
+
+      // #3 - incremental backup for multiple tables
+
+      List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
+      String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList,
+        BACKUP_ROOT_DIR);
+      assertTrue(checkSucceeded(backupIdIncMultiple));
+      deletable = cleaner.getDeletableFiles(newWalFiles);
+
+      assertTrue(Iterables.size(deletable) == newWalFiles.size());
+
+      conn.close();
+    }
+  }
+
+  private List<String> convert(List<FileStatus> walFiles) {
+    List<String> result = new ArrayList<String>();
+    for (FileStatus fs : walFiles) {
+      LOG.debug("+++WAL: " + fs.getPath().toString());
+      result.add(fs.getPath().toString());
+    }
+    return result;
+  }
+
+  private List<FileStatus> getListOfWALFiles(Configuration c) throws IOException {
+    Path logRoot = new Path(FSUtils.getRootDir(c), HConstants.HREGION_LOGDIR_NAME);
+    FileSystem fs = FileSystem.get(c);
+    RemoteIterator<LocatedFileStatus> it = fs.listFiles(logRoot, true);
+    List<FileStatus> logFiles = new ArrayList<FileStatus>();
+    while (it.hasNext()) {
+      LocatedFileStatus lfs = it.next();
+      if (lfs.isFile() && !DefaultWALProvider.isMetaFile(lfs.getPath())) {
+        logFiles.add(lfs);
+        LOG.info(lfs);
+      }
+    }
+    return logFiles;
+  }
+
+}


Mime
View raw message