hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From te...@apache.org
Subject [3/6] hbase git commit: HBASE-15411 Rewrite backup with Procedure V2 - phase 1
Date Fri, 01 Apr 2016 22:02:25 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-protocol/src/main/protobuf/Backup.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Backup.proto b/hbase-protocol/src/main/protobuf/Backup.proto
index 383b990..c17ad06 100644
--- a/hbase-protocol/src/main/protobuf/Backup.proto
+++ b/hbase-protocol/src/main/protobuf/Backup.proto
@@ -27,6 +27,24 @@ option optimize_for = SPEED;
 
 import "HBase.proto";
 
+enum FullTableBackupState {
+  PRE_SNAPSHOT_TABLE = 1;
+  SNAPSHOT_TABLES = 2;
+  SNAPSHOT_COPY = 3;
+  BACKUP_COMPLETE = 4;
+}
+
+enum IncrementalTableBackupState {
+  PREPARE_INCREMENTAL = 1;
+  INCREMENTAL_COPY = 2;
+  INCR_BACKUP_COMPLETE = 3;
+}
+
+message SnapshotTableStateData {
+  required TableName table = 1;
+  required string snapshotName = 2;
+}
+
 enum BackupType {
   FULL = 0;
   INCREMENTAL = 1;
@@ -103,3 +121,9 @@ message BackupContext {
     STORE_MANIFEST = 5;
   } 
 }
+
+message BackupProcContext {
+  required BackupContext ctx = 1;
+  repeated ServerTimestamp server_timestamp = 2;
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 79bb862..6431c73 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -27,6 +27,7 @@ option java_generate_equals_and_hash = true;
 option optimize_for = SPEED;
 
 import "HBase.proto";
+import "Backup.proto";
 import "Client.proto";
 import "ClusterStatus.proto";
 import "ErrorHandling.proto";
@@ -485,7 +486,7 @@ message GetProcedureResultResponse {
 
 message AbortProcedureRequest {
   required uint64 proc_id = 1;
-  optional bool mayInterruptIfRunning = 2 [default = true];
+  optional bool may_interrupt_if_running = 2 [default = true];
 }
 
 message AbortProcedureResponse {
@@ -540,6 +541,19 @@ message SecurityCapabilitiesResponse {
   repeated Capability capabilities = 1;
 }
 
+message BackupTablesRequest {
+  required BackupType type = 1;
+  repeated TableName tables = 2;
+  required string target_root_dir = 3;
+  optional int64 workers = 4;
+  optional int64 bandwidth = 5;
+}
+
+message BackupTablesResponse {
+  optional uint64 proc_id = 1;
+  optional string backup_id = 2;
+}
+
 service MasterService {
   /** Used by the client to get the number of regions that have received the updated schema */
   rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
@@ -814,4 +828,8 @@ service MasterService {
   /** returns a list of procedures */
   rpc ListProcedures(ListProceduresRequest)
     returns(ListProceduresResponse);
+
+  /** backup table set */
+  rpc backupTables(BackupTablesRequest)
+    returns(BackupTablesResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
deleted file mode 100644
index 7c8ea39..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-
-public interface BackupClient {
-
-  public void setConf(Configuration conf);
-
-  /**
-   * Send backup request to server, and monitor the progress if necessary
-   * @param backupType : full or incremental
-   * @param targetRootDir : the root path specified by user
-   * @param tableList : the table list specified by user
-   * @return backupId backup id
-   * @throws IOException exception
-   */
- public String create(BackupType backupType, List<TableName> tableList,
-      String targetRootDir) throws IOException;
- }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
index 6fbfe18..e0c6483 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hbase.backup;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.backup.impl.BackupClientImpl;
 import org.apache.hadoop.hbase.backup.impl.BackupCopyService;
 import org.apache.hadoop.hbase.backup.impl.IncrementalRestoreService;
 import org.apache.hadoop.hbase.backup.impl.RestoreClientImpl;
@@ -34,7 +33,6 @@ public final class BackupRestoreFactory {
 
   public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class";
   public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class";
-  public final static String HBASE_BACKUP_CLIENT_IMPL_CLASS = "hbase.backup.client.class";
   public final static String HBASE_RESTORE_CLIENT_IMPL_CLASS = "hbase.restore.client.class";
 
   private BackupRestoreFactory(){
@@ -66,20 +64,6 @@ public final class BackupRestoreFactory {
   }
   
   /**
-   * Gets backup client implementation
-   * @param conf - configuration
-   * @return backup client
-   */
-  public static BackupClient getBackupClient(Configuration conf) {
-    Class<? extends BackupClient> cls =
-        conf.getClass(HBASE_BACKUP_CLIENT_IMPL_CLASS, BackupClientImpl.class,
-          BackupClient.class);
-    BackupClient client = ReflectionUtils.newInstance(cls, conf);
-    client.setConf(conf);
-    return client;
-  }
-  
-  /**
    * Gets restore client implementation
    * @param conf - configuration
    * @return backup client

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
index 6e5a355..4e88125 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
@@ -19,36 +19,19 @@
 
 package org.apache.hadoop.hbase.backup;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.TreeMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * View to an on-disk Backup Image FileSytem
@@ -59,56 +42,10 @@ import org.apache.hadoop.hbase.util.Bytes;
 public class HBackupFileSystem {
   public static final Log LOG = LogFactory.getLog(HBackupFileSystem.class);
 
-  private final String RESTORE_TMP_PATH = "/tmp";
-  private final String[] ignoreDirs = { "recovered.edits" };
-
-  private final Configuration conf;
-  private final FileSystem fs;
-  private final Path backupRootPath;
-  private final Path restoreTmpPath;
-  private final String backupId;
-
-  /**
-   * Create a view to the on-disk Backup Image.
-   * @param conf  to use
-   * @param backupPath  to where the backup Image stored
-   * @param backupId represent backup Image
-   */
-  public HBackupFileSystem(final Configuration conf, final Path backupRootPath, final String backupId)
-      throws IOException {
-    this.conf = conf;
-    this.fs = backupRootPath.getFileSystem(conf);
-    this.backupRootPath = backupRootPath;
-    this.backupId = backupId; // the backup ID for the lead backup Image
-    this.restoreTmpPath = new Path(conf.get("hbase.fs.tmp.dir") != null?
-          conf.get("hbase.fs.tmp.dir"): RESTORE_TMP_PATH,
-        "restore");
-  }
-
-  public Path getBackupRootPath() {
-    return backupRootPath;
-  }
-
-  public String getBackupId() {
-    return backupId;
-  }
-
   /**
-   * @param tableName is the table backed up
-   * @return {@link HTableDescriptor} saved in backup image of the table
+   * This is utility class.
    */
-  public HTableDescriptor getTableDesc(TableName tableName)
-      throws FileNotFoundException, IOException {
-    Path tableInfoPath = this.getTableInfoPath(tableName);
-    SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
-    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
-    HTableDescriptor tableDescriptor = manifest.getTableDescriptor();
-    if (!tableDescriptor.getNameAsString().equals(tableName)) {
-      LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
-          + tableInfoPath.toString());
-      LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString());
-    }
-    return tableDescriptor;
+  private HBackupFileSystem() {
   }
 
   /**
@@ -141,69 +78,6 @@ public class HBackupFileSystem {
   }
 
   /**
-   * return value represent path for:
-   * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/.hbase-snapshot"
-   * @param backupRootPath backup root path
-   * @param tableName table name
-   * @param backupId backup Id
-   * @return path for snapshot
-   */
-  public static Path getTableSnapshotPath(Path backupRootPath, TableName tableName,
-      String backupId) {
-    return new Path(getTableBackupPath(backupRootPath, tableName, backupId),
-      HConstants.SNAPSHOT_DIR_NAME);
-  }
-
-  /**
-   * return value represent path for:
-   * "..../default/t1_dn/backup_1396650096738/.hbase-snapshot/snapshot_1396650097621_default_t1_dn"
-   * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo,
-   * .data.manifest (trunk)
-   * @param tableName table name
-   * @return path to table info
-   * @throws FileNotFoundException exception
-   * @throws IOException exception
-   */
-  public Path getTableInfoPath(TableName tableName)
-      throws FileNotFoundException, IOException {
-    Path tableSnapShotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
-    Path tableInfoPath = null;
-
-    // can't build the path directly as the timestamp values are different
-    FileStatus[] snapshots = fs.listStatus(tableSnapShotPath);
-    for (FileStatus snapshot : snapshots) {
-      tableInfoPath = snapshot.getPath();
-      // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest";
-      if (tableInfoPath.getName().endsWith("data.manifest")) {
-        break;
-      }
-    }
-    return tableInfoPath;
-  }
-
-  /**
-   * return value represent path for:
-   * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
-   * @param tabelName table name
-   * @return path to table archive
-   * @throws IOException exception
-   */
-  public Path getTableArchivePath(TableName tableName)
-      throws IOException {
-    Path baseDir = new Path(getTableBackupPath(backupRootPath, tableName, backupId),
-      HConstants.HFILE_ARCHIVE_DIRECTORY);
-    Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
-    Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
-    Path tableArchivePath =
-        new Path(archivePath, tableName.getQualifierAsString());
-    if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) {
-      LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists");
-      tableArchivePath = null; // empty table has no archive
-    }
-    return tableArchivePath;
-  }
-
-  /**
    * Given the backup root dir and the backup id, return the log file location for an incremental
    * backup.
    * @param backupRootDir backup root directory
@@ -246,216 +120,6 @@ public class HBackupFileSystem {
   }
 
   /**
-   * Gets region list
-   * @param tableName table name
-   * @return RegionList region list
-   * @throws FileNotFoundException exception
-   * @throws IOException exception
-   */
-
-  public ArrayList<Path> getRegionList(TableName tableName)
-      throws FileNotFoundException, IOException {
-    Path tableArchivePath = this.getTableArchivePath(tableName);
-    ArrayList<Path> regionDirList = new ArrayList<Path>();
-    FileStatus[] children = fs.listStatus(tableArchivePath);
-    for (FileStatus childStatus : children) {
-      // here child refer to each region(Name)
-      Path child = childStatus.getPath();
-      regionDirList.add(child);
-    }
-    return regionDirList;
-  }
-
-  /**
-   * Gets region list
-   * @param tableArchivePath table archive path
-   * @return RegionList region list
-   * @throws FileNotFoundException exception
-   * @throws IOException exception
-   */
-  public ArrayList<Path> getRegionList(Path tableArchivePath) throws FileNotFoundException,
-  IOException {
-    ArrayList<Path> regionDirList = new ArrayList<Path>();
-    FileStatus[] children = fs.listStatus(tableArchivePath);
-    for (FileStatus childStatus : children) {
-      // here child refer to each region(Name)
-      Path child = childStatus.getPath();
-      regionDirList.add(child);
-    }
-    return regionDirList;
-  }
-
-  /**
-   * Counts the number of files in all subdirectories of an HBase tables, i.e. HFiles. And finds the
-   * maximum number of files in one HBase table.
-   * @param tableArchivePath archive path
-   * @return the maximum number of files found in 1 HBase table
-   * @throws IOException exception
-   */
-  public int getMaxNumberOfFilesInSubDir(Path tableArchivePath) throws IOException {
-    int result = 1;
-    ArrayList<Path> regionPathList = this.getRegionList(tableArchivePath);
-    // tableArchivePath = this.getTableArchivePath(tableName);
-
-    if (regionPathList == null || regionPathList.size() == 0) {
-      throw new IllegalStateException("Cannot restore hbase table because directory '"
-          + tableArchivePath + "' is not a directory.");
-    }
-
-    for (Path regionPath : regionPathList) {
-      result = Math.max(result, getNumberOfFilesInDir(regionPath));
-    }
-    return result;
-  }
-
-  /**
-   * Counts the number of files in all subdirectories of an HBase table, i.e. HFiles.
-   * @param regionPath Path to an HBase table directory
-   * @return the number of files all directories
-   * @throws IOException exception
-   */
-  public int getNumberOfFilesInDir(Path regionPath) throws IOException {
-    int result = 0;
-
-    if (!fs.exists(regionPath) || !fs.getFileStatus(regionPath).isDirectory()) {
-      throw new IllegalStateException("Cannot restore hbase table because directory '"
-          + regionPath.toString() + "' is not a directory.");
-    }
-
-    FileStatus[] tableDirContent = fs.listStatus(regionPath);
-    for (FileStatus subDirStatus : tableDirContent) {
-      FileStatus[] colFamilies = fs.listStatus(subDirStatus.getPath());
-      for (FileStatus colFamilyStatus : colFamilies) {
-        FileStatus[] colFamilyContent = fs.listStatus(colFamilyStatus.getPath());
-        result += colFamilyContent.length;
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Duplicate the backup image if it's on local cluster
-   * @see HStore#bulkLoadHFile(String, long)
-   * @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum)
-   * @param tableArchivePath archive path
-   * @return the new tableArchivePath
-   * @throws IOException exception
-   */
-  public Path checkLocalAndBackup(Path tableArchivePath) throws IOException {
-    // Move the file if it's on local cluster
-    boolean isCopyNeeded = false;
-
-    FileSystem srcFs = tableArchivePath.getFileSystem(conf);
-    FileSystem desFs = FileSystem.get(conf);
-    if (tableArchivePath.getName().startsWith("/")) {
-      isCopyNeeded = true;
-    } else {
-      // This should match what is done in @see HRegionFileSystem#bulkLoadStoreFile(String, Path,
-      // long)
-      if (srcFs.getUri().equals(desFs.getUri())) {
-        LOG.debug("cluster hold the backup image: " + srcFs.getUri() + "; local cluster node: "
-            + desFs.getUri());
-        isCopyNeeded = true;
-      }
-    }
-    if (isCopyNeeded) {
-      LOG.debug("File " + tableArchivePath + " on local cluster, back it up before restore");
-      if (desFs.exists(restoreTmpPath)) {
-        try {
-          desFs.delete(restoreTmpPath, true);
-        } catch (IOException e) {
-          LOG.debug("Failed to delete path: " + restoreTmpPath
-            + ", need to check whether restore target DFS cluster is healthy");
-        }
-      }
-      FileUtil.copy(srcFs, tableArchivePath, desFs, restoreTmpPath, false, conf);
-      LOG.debug("Copied to temporary path on local cluster: " + restoreTmpPath);
-      tableArchivePath = restoreTmpPath;
-    }
-    return tableArchivePath;
-  }
-
-  /**
-   * Calculate region boundaries and add all the column families to the table descriptor
-   * @param regionDirList region dir list
-   * @return a set of keys to store the boundaries
-   */
-  public byte[][] generateBoundaryKeys(ArrayList<Path> regionDirList)
-      throws FileNotFoundException, IOException {
-    TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
-    // Build a set of keys to store the boundaries
-    byte[][] keys = null;
-    // calculate region boundaries and add all the column families to the table descriptor
-    for (Path regionDir : regionDirList) {
-      LOG.debug("Parsing region dir: " + regionDir);
-      Path hfofDir = regionDir;
-
-      if (!fs.exists(hfofDir)) {
-        LOG.warn("HFileOutputFormat dir " + hfofDir + " not found");
-      }
-
-      FileStatus[] familyDirStatuses = fs.listStatus(hfofDir);
-      if (familyDirStatuses == null) {
-        throw new IOException("No families found in " + hfofDir);
-      }
-
-      for (FileStatus stat : familyDirStatuses) {
-        if (!stat.isDirectory()) {
-          LOG.warn("Skipping non-directory " + stat.getPath());
-          continue;
-        }
-        boolean isIgnore = false;
-        String pathName = stat.getPath().getName();
-        for (String ignore : ignoreDirs) {
-          if (pathName.contains(ignore)) {
-            LOG.warn("Skipping non-family directory" + pathName);
-            isIgnore = true;
-            break;
-          }
-        }
-        if (isIgnore) {
-          continue;
-        }
-        Path familyDir = stat.getPath();
-        LOG.debug("Parsing family dir [" + familyDir.toString() + " in region [" + regionDir + "]");
-        // Skip _logs, etc
-        if (familyDir.getName().startsWith("_") || familyDir.getName().startsWith(".")) {
-          continue;
-        }
-
-        // start to parse hfile inside one family dir
-        Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
-        for (Path hfile : hfiles) {
-          if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".")
-              || StoreFileInfo.isReference(hfile.getName())
-              || HFileLink.isHFileLink(hfile.getName())) {
-            continue;
-          }
-          HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(conf), conf);
-          final byte[] first, last;
-          try {
-            reader.loadFileInfo();
-            first = reader.getFirstRowKey();
-            last = reader.getLastRowKey();
-            LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first="
-                + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
-
-            // To eventually infer start key-end key boundaries
-            Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0;
-            map.put(first, value + 1);
-            value = map.containsKey(last) ? (Integer) map.get(last) : 0;
-            map.put(last, value - 1);
-          } finally {
-            reader.close();
-          }
-        }
-      }
-    }
-    keys = LoadIncrementalHFiles.inferBoundaries(map);
-    return keys;
-  }
-
-  /**
    * Check whether the backup image path and there is manifest file in the path.
    * @param backupManifestMap If all the manifests are found, then they are put into this map
    * @param tableArray the tables involved

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
deleted file mode 100644
index bf33cfb..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupClient;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.BackupClientUtil;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-import com.google.common.collect.Lists;
-
-/**
- * Backup HBase tables locally or on a remote cluster Serve as client entry point for the following
- * features: - Full Backup provide local and remote back/restore for a list of tables - Incremental
- * backup to build on top of full backup as daily/weekly backup - Convert incremental backup WAL
- * files into hfiles - Merge several backup images into one(like merge weekly into monthly) - Add
- * and remove table to and from Backup image - Cancel a backup process - Describe information of
- * a backup image
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public final class BackupClientImpl implements BackupClient {
-  private static final Log LOG = LogFactory.getLog(BackupClientImpl.class);
-  private Configuration conf;
-  private BackupManager backupManager;
-
-  public BackupClientImpl() {
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  /**
-   * Prepare and submit Backup request
-   * @param backupId : backup_timestame (something like backup_1398729212626)
-   * @param backupType : full or incremental
-   * @param tableList : tables to be backuped
-   * @param targetRootDir : specified by user
-   * @throws IOException exception
-   */
-  protected void requestBackup(String backupId, BackupType backupType, List<TableName> tableList,
-      String targetRootDir) throws IOException {
-
-    BackupContext backupContext = null;
-
-    HBaseAdmin hbadmin = null;
-    Connection conn = null;
-    try {
-      backupManager = new BackupManager(conf);
-      if (backupType == BackupType.INCREMENTAL) {
-        Set<TableName> incrTableSet = backupManager.getIncrementalBackupTableSet();
-        if (incrTableSet.isEmpty()) {
-          LOG.warn("Incremental backup table set contains no table.\n"
-              + "Use 'backup create full' or 'backup stop' to \n "
-              + "change the tables covered by incremental backup.");
-          throw new DoNotRetryIOException("No table covered by incremental backup.");
-        }
-
-        LOG.info("Incremental backup for the following table set: " + incrTableSet);
-        tableList = Lists.newArrayList(incrTableSet);
-      }
-
-      // check whether table exists first before starting real request
-      if (tableList != null) {
-        ArrayList<TableName> nonExistingTableList = null;
-        conn = ConnectionFactory.createConnection(conf);
-        hbadmin = (HBaseAdmin) conn.getAdmin();
-        for (TableName tableName : tableList) {
-          if (!hbadmin.tableExists(tableName)) {
-            if (nonExistingTableList == null) {
-              nonExistingTableList = new ArrayList<>();
-            }
-            nonExistingTableList.add(tableName);
-          }
-        }
-        if (nonExistingTableList != null) {
-          if (backupType == BackupType.INCREMENTAL ) {
-            LOG.warn("Incremental backup table set contains non-exising table: "
-                + nonExistingTableList);
-          } else {
-            // Throw exception only in full mode - we try to backup non-existing table
-            throw new DoNotRetryIOException("Non-existing tables found in the table list: "
-                + nonExistingTableList);
-          }
-        }
-      }
-
-      // if any target table backup dir already exist, then no backup action taken
-      if (tableList != null) {
-        for (TableName table : tableList) {
-          String targetTableBackupDir =
-              HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
-          Path targetTableBackupDirPath = new Path(targetTableBackupDir);
-          FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conf);
-          if (outputFs.exists(targetTableBackupDirPath)) {
-            throw new DoNotRetryIOException("Target backup directory " + targetTableBackupDir
-              + " exists already.");
-          }
-        }
-      }
-      backupContext =
-          backupManager.createBackupContext(backupId, backupType, tableList, targetRootDir);
-      backupManager.initialize();
-      backupManager.dispatchRequest(backupContext);
-    } catch (BackupException e) {
-      // suppress the backup exception wrapped within #initialize or #dispatchRequest, backup
-      // exception has already been handled normally
-      LOG.error("Backup Exception ", e);
-    } finally {
-      if (hbadmin != null) {
-        hbadmin.close();
-      }
-      if (conn != null) {
-        conn.close();
-      }
-    }
-  }
-
-  @Override
-  public String create(BackupType backupType, List<TableName> tableList, String backupRootPath)
-      throws IOException {
-
-    String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime();
-    BackupClientUtil.checkTargetDir(backupRootPath, conf);
-
-    // table list specified for backup, trigger backup on specified tables
-    try {
-      requestBackup(backupId, backupType, tableList, backupRootPath);
-    } catch (RuntimeException e) {
-      String errMsg = e.getMessage();
-      if (errMsg != null
-          && (errMsg.startsWith("Non-existing tables found") || errMsg
-              .startsWith("Snapshot is not found"))) {
-        LOG.error(errMsg + ", please check your command");
-        throw e;
-      } else {
-        throw e;
-      }
-    } finally{
-      if(backupManager != null) {
-        backupManager.close();
-      }
-    }
-    return backupId;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index 56e26fa..1789cdf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -22,12 +22,14 @@ import java.io.IOException;
 import org.apache.commons.cli.CommandLine;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hbase.backup.BackupClient;
-import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
+import org.apache.hadoop.hbase.backup.BackupRequest;
 import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 
 import com.google.common.collect.Lists;
 
@@ -108,12 +110,15 @@ public final class BackupCommands {
 
       String tables = (args.length == 3) ? args[2] : null;
 
-      try {
-        BackupClient client = BackupRestoreFactory.getBackupClient(getConf());
-        client.create(BackupType.valueOf(args[0].toUpperCase()),
-          Lists.newArrayList(BackupUtil.parseTableNames(tables)), args[1]);
-      } catch (RuntimeException e) {
-        System.out.println("ERROR: " + e.getMessage());
+      try (Connection conn = ConnectionFactory.createConnection(getConf());
+          Admin admin = conn.getAdmin();) {
+        BackupRequest request = new BackupRequest();
+        request.setBackupType(BackupType.valueOf(args[0].toUpperCase()))
+        .setTableList(Lists.newArrayList(BackupUtil.parseTableNames(tables)))
+        .setTargetRootDir(args[1]);
+        admin.backupTables(request);
+      } catch (IOException e) {
+        System.err.println("ERROR: " + e.getMessage());
         System.exit(-1);
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
index 1be0c3b..06e66dc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.backup.impl;
 
 import java.io.IOException;
+import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -29,7 +30,6 @@ import java.util.Set;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -77,10 +77,21 @@ public class BackupContext {
     this.totalBytesCopied = totalBytesCopied;
   }
 
+  // backup status flag
+  public static enum BackupState {
+    RUNNING, COMPLETE, FAILED, CANCELLED;
+  }
+
   public void setCancelled(boolean cancelled) {
     this.state = BackupState.CANCELLED;;
   }
 
+  // backup phase
+  // for overall backup (for table list, some table may go online, while some may go offline)
+  protected static enum BackupPhase {
+    SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST;
+  }
+
   // backup id: a timestamp when we request the backup
   private String backupId;
 
@@ -91,10 +102,10 @@ public class BackupContext {
   private String targetRootDir;
 
   // overall backup state
-  private BackupHandler.BackupState state;
+  private BackupState state;
 
   // overall backup phase
-  private BackupHandler.BackupPhase phase;
+  private BackupPhase phase;
 
   // overall backup failure message
   private String failedMsg;
@@ -210,19 +221,19 @@ public class BackupContext {
     return totalBytesCopied;
   }
 
-  public BackupHandler.BackupState getState() {
+  public BackupState getState() {
     return state;
   }
 
-  public void setState(BackupHandler.BackupState flag) {
+  public void setState(BackupState flag) {
     this.state = flag;
   }
 
-  public BackupHandler.BackupPhase getPhase() {
+  public BackupPhase getPhase() {
     return phase;
   }
 
-  public void setPhase(BackupHandler.BackupPhase phase) {
+  public void setPhase(BackupPhase phase) {
     this.phase = phase;
   }
 
@@ -308,7 +319,7 @@ public class BackupContext {
     return null;
   }
 
-  public byte[] toByteArray() throws IOException {
+  BackupProtos.BackupContext toBackupContext() {
     BackupProtos.BackupContext.Builder builder =
         BackupProtos.BackupContext.newBuilder();
     builder.setBackupId(getBackupId());
@@ -332,8 +343,11 @@ public class BackupContext {
     builder.setTargetRootDir(getTargetRootDir());
     builder.setTotalBytesCopied(getTotalBytesCopied());
     builder.setType(BackupProtos.BackupType.valueOf(getType().name()));
-    byte[] data = builder.build().toByteArray();
-    return data;
+    return builder.build();
+  }
+
+  public byte[] toByteArray() throws IOException {
+    return toBackupContext().toByteArray();
   }
 
   private void setBackupStatusMap(Builder builder) {
@@ -343,9 +357,15 @@ public class BackupContext {
   }
 
   public static BackupContext fromByteArray(byte[] data) throws IOException {
+    return fromProto(BackupProtos.BackupContext.parseFrom(data));
+  }
+  
+  public static BackupContext fromStream(final InputStream stream) throws IOException {
+    return fromProto(BackupProtos.BackupContext.parseDelimitedFrom(stream));
+  }
 
+  static BackupContext fromProto(BackupProtos.BackupContext proto) {
     BackupContext context = new BackupContext();
-    BackupProtos.BackupContext proto = BackupProtos.BackupContext.parseFrom(data);
     context.setBackupId(proto.getBackupId());
     context.setBackupStatusMap(toMap(proto.getTableBackupStatusList()));
     context.setEndTs(proto.getEndTs());
@@ -353,13 +373,13 @@ public class BackupContext {
       context.setFailedMsg(proto.getFailedMessage());
     }
     if(proto.hasState()) {
-      context.setState(BackupHandler.BackupState.valueOf(proto.getState().name()));
+      context.setState(BackupContext.BackupState.valueOf(proto.getState().name()));
     }
     if(proto.hasHlogTargetDir()) {
       context.setHlogTargetDir(proto.getHlogTargetDir());
     }
     if(proto.hasPhase()) {
-      context.setPhase(BackupHandler.BackupPhase.valueOf(proto.getPhase().name()));
+      context.setPhase(BackupPhase.valueOf(proto.getPhase().name()));
     }
     if(proto.hasProgress()) {
       context.setProgress(proto.getProgress());

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
deleted file mode 100644
index b9d71f6..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
+++ /dev/null
@@ -1,702 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.concurrent.Callable;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.BackupClientUtil;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
-import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.zookeeper.KeeperException.NoNodeException;
-
-/**
- * A Handler to carry the operations of backup progress
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class BackupHandler implements Callable<Void> {
-  private static final Log LOG = LogFactory.getLog(BackupHandler.class);
-
-  // backup phase
-  // for overall backup (for table list, some table may go online, while some may go offline)
-  protected static enum BackupPhase {
-    REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST;
-  }
-
-  // backup status flag
-  public static enum BackupState {
-    WAITING, RUNNING, COMPLETE, FAILED, CANCELLED;
-  }
-
-  protected final BackupContext backupContext;
-  private final BackupManager backupManager;
-  private final Configuration conf;
-  private final Connection conn;
-
-  public BackupHandler(BackupContext backupContext,
-      BackupManager backupManager, Configuration conf, Connection connection) {
-    this.backupContext = backupContext;
-    this.backupManager = backupManager;
-    this.conf = conf;
-    this.conn = connection;
-  }
-
-  public BackupContext getBackupContext() {
-    return backupContext;
-  }
-
-  @Override
-  public Void call() throws Exception {
-    try(Admin admin = conn.getAdmin()) {
-      // overall backup begin
-      this.beginBackup(backupContext);
-      HashMap<String, Long> newTimestamps = null;
-      // handle full or incremental backup for table or table list
-      if (backupContext.getType() == BackupType.FULL) {
-        String savedStartCode = null;
-        boolean firstBackup = false;
-        // do snapshot for full table backup
-
-        try {
-          savedStartCode = backupManager.readBackupStartCode();
-          firstBackup = savedStartCode == null;
-          if (firstBackup) {
-            // This is our first backup. Let's put some marker on ZK so that we can hold the logs
-            // while we do the backup.
-            backupManager.writeBackupStartCode(0L);
-          }
-          // We roll log here before we do the snapshot. It is possible there is duplicate data
-          // in the log that is already in the snapshot. But if we do it after the snapshot, we
-          // could have data loss.
-          // A better approach is to do the roll log on each RS in the same global procedure as
-          // the snapshot.
-          LOG.info("Execute roll log procedure for full backup ...");
-          admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
-            LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, new HashMap<String, String>());
-          newTimestamps = backupManager.readRegionServerLastLogRollResult();
-          if (firstBackup) {
-            // Updates registered log files
-            // We record ALL old WAL files as registered, because
-            // this is a first full backup in the system and these
-            // files are not needed for next incremental backup
-            List<String> logFiles = BackupUtil.getWALFilesOlderThan(conf, newTimestamps);
-            backupManager.recordWALFiles(logFiles);
-          }
-          this.snapshotForFullBackup(backupContext);
-        } catch (BackupException e) {
-          // fail the overall backup and return
-          this.failBackup(backupContext, e, "Unexpected BackupException : ");
-          return null;
-        }
-
-        // update the faked progress currently for snapshot done
-        updateProgress(backupContext, backupManager, 10, 0);
-        // do snapshot copy
-        try {
-          this.snapshotCopy(backupContext);
-        } catch (Exception e) {
-          // fail the overall backup and return
-          this.failBackup(backupContext, e, "Unexpected BackupException : ");
-          return null;
-        }
-        // Updates incremental backup table set
-        backupManager.addIncrementalBackupTableSet(backupContext.getTables());
-
-      } else if (backupContext.getType() == BackupType.INCREMENTAL) {
-        LOG.debug("For incremental backup, current table set is "
-            + backupManager.getIncrementalBackupTableSet());
-        // do incremental table backup preparation
-        backupContext.setPhase(BackupPhase.PREPARE_INCREMENTAL);
-        // avoid action if has been cancelled
-        if (backupContext.isCancelled()) {
-          return null;
-        }
-        try {
-          IncrementalBackupManager incrBackupManager = new IncrementalBackupManager(backupManager);
-
-          newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext);
-        } catch (Exception e) {
-          // fail the overall backup and return
-          this.failBackup(backupContext, e, "Unexpected Exception : ");
-          return null;
-        }
-        // update the faked progress currently for incremental preparation done
-        updateProgress(backupContext, backupManager, 10, 0);
-
-        // do incremental copy
-        try {
-          // copy out the table and region info files for each table
-          BackupUtil.copyTableRegionInfo(backupContext, conf);
-          this.incrementalCopy(backupContext);
-          // Save list of WAL files copied
-          backupManager.recordWALFiles(backupContext.getIncrBackupFileList());
-        } catch (Exception e) {
-          // fail the overall backup and return
-          this.failBackup(backupContext, e, "Unexpected exception doing incremental copy : ");
-          return null;
-        }
-      }
-
-      // set overall backup status: complete. Here we make sure to complete the backup. After this
-      // checkpoint, even if entering cancel process, will let the backup finished
-      backupContext.setState(BackupState.COMPLETE);
-
-      if (backupContext.getType() == BackupType.INCREMENTAL) {
-        // Set the previousTimestampMap which is before this current log roll to the manifest.
-        HashMap<TableName, HashMap<String, Long>> previousTimestampMap =
-            backupManager.readLogTimestampMap();
-        backupContext.setIncrTimestampMap(previousTimestampMap);
-      }
-
-      // The table list in backupContext is good for both full backup and incremental backup.
-      // For incremental backup, it contains the incremental backup table set.
-      backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps);
-
-      HashMap<TableName, HashMap<String, Long>> newTableSetTimestampMap =
-          backupManager.readLogTimestampMap();
-
-      Long newStartCode =
-          BackupClientUtil.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap));
-      backupManager.writeBackupStartCode(newStartCode);
-
-      // backup complete
-      this.completeBackup(backupContext);
-    } catch (Exception e) {
-      // even during completing backup (#completeBackup(backupContext)), exception may occur, or
-      // exception occur during other process, fail the backup finally
-      this.failBackup(backupContext, e, "Error caught during backup progress: ");
-    }
-    return null;
-  }
-
-  /**
-   * Begin the overall backup.
-   * @param backupContext backup context
-   * @throws IOException exception
-   */
-  private void beginBackup(BackupContext backupContext) throws IOException {
-    // set the start timestamp of the overall backup
-    long startTs = EnvironmentEdgeManager.currentTime();
-    backupContext.setStartTs(startTs);
-    // set overall backup status: ongoing
-    backupContext.setState(BackupState.RUNNING);
-    LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + ".");
-
-    backupManager.updateBackupStatus(backupContext);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Backup session " + backupContext.getBackupId() + " has been started.");
-    }
-  }
-
-  /**
-   * Snapshot for full table backup.
-   * @param backupContext backup context
-   * @throws IOException exception
-   */
-  private void snapshotForFullBackup(BackupContext backupContext) throws IOException {
-    LOG.info("HBase snapshot full backup for " + backupContext.getBackupId());
-
-    // avoid action if has been cancelled
-    if (backupContext.isCancelled()) {
-      return;
-    }
-
-    try (Admin admin = conn.getAdmin()) {
-      // we do HBase snapshot for tables in the table list one by one currently
-      for (TableName table : backupContext.getTables()) {
-        // avoid action if it has been cancelled
-        if (backupContext.isCancelled()) {
-          return;
-        }
-
-        HBaseProtos.SnapshotDescription backupSnapshot;
-
-        // wrap a SnapshotDescription for offline/online snapshot
-        backupSnapshot = this.wrapSnapshotDescription(table);
-
-        try {
-          // Kick off snapshot for backup
-          admin.snapshot(backupSnapshot);
-        } catch (Exception e) {
-          LOG.error("Snapshot failed to create " + getMessage(e));
-
-          // currently, we fail the overall backup if any table in the list failed, so throw the
-          // exception out for overall backup failing
-          throw new BackupException("Backup snapshot failed on table " + table, e);
-        }
-
-        // set the snapshot name in BackupStatus of this table, only after snapshot success.
-        backupContext.setSnapshotName(table, backupSnapshot.getName());
-      }
-    }
-  }
-
-  /**
-   * Fail the overall backup.
-   * @param backupContext backup context
-   * @param e exception
-   * @throws Exception exception
-   */
-  private void failBackup(BackupContext backupContext, Exception e, String msg) throws Exception {
-    LOG.error(msg + getMessage(e));
-    // If this is a cancel exception, then we've already cleaned.
-
-    if (this.backupContext.getState().equals(BackupState.CANCELLED)) {
-      return;
-    }
-
-    // set the failure timestamp of the overall backup
-    backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
-
-    // set failure message
-    backupContext.setFailedMsg(e.getMessage());
-
-    // set overall backup status: failed
-    backupContext.setState(BackupState.FAILED);
-
-    // compose the backup failed data
-    String backupFailedData =
-        "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs()
-        + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase()
-        + ",failedmessage=" + backupContext.getFailedMsg();
-    LOG.error(backupFailedData);
-
-    backupManager.updateBackupStatus(backupContext);
-
-    // if full backup, then delete HBase snapshots if there already have snapshots taken
-    // and also clean up export snapshot log files if exist
-    if (backupContext.getType() == BackupType.FULL) {
-      this.deleteSnapshot(backupContext);
-      this.cleanupExportSnapshotLog();
-    } /*
-     * else { // support incremental backup code in future jira // TODO. See HBASE-14124 }
-     */
-
-    // clean up the uncompleted data at target directory if the ongoing backup has already entered
-    // the copy phase
-    // For incremental backup, DistCp logs will be cleaned with the targetDir.
-    this.cleanupTargetDir();
-
-    LOG.info("Backup " + backupContext.getBackupId() + " failed.");
-  }
-
-  /**
-   * Update the ongoing back token znode with new progress.
-   * @param backupContext backup context
-   * 
-   * @param newProgress progress
-   * @param bytesCopied bytes copied
-   * @throws NoNodeException exception
-   */
-  public static void updateProgress(BackupContext backupContext, BackupManager backupManager,
-      int newProgress, long bytesCopied) throws IOException {
-    // compose the new backup progress data, using fake number for now
-    String backupProgressData = newProgress + "%";
-
-    backupContext.setProgress(newProgress);
-    backupManager.updateBackupStatus(backupContext);
-    LOG.debug("Backup progress data \"" + backupProgressData
-      + "\" has been updated to hbase:backup for " + backupContext.getBackupId());
-  }
-
-  /**
-   * Complete the overall backup.
-   * @param backupContext backup context
-   * @throws Exception exception
-   */
-  private void completeBackup(BackupContext backupContext) throws Exception {
-
-    // set the complete timestamp of the overall backup
-    backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
-    // set overall backup status: complete
-    backupContext.setState(BackupState.COMPLETE);
-    // add and store the manifest for the backup
-    this.addManifest(backupContext);
-
-    // after major steps done and manifest persisted, do convert if needed for incremental backup
-    /* in-fly convert code here, provided by future jira */
-    LOG.debug("in-fly convert code here, provided by future jira");
-
-    // compose the backup complete data
-    String backupCompleteData =
-        this.obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs()
-        + ",completets=" + backupContext.getEndTs() + ",bytescopied="
-        + backupContext.getTotalBytesCopied();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData);
-    }
-    backupManager.updateBackupStatus(backupContext);
-
-    // when full backup is done:
-    // - delete HBase snapshot
-    // - clean up directories with prefix "exportSnapshot-", which are generated when exporting
-    // snapshots
-    if (backupContext.getType() == BackupType.FULL) {
-      this.deleteSnapshot(backupContext);
-      this.cleanupExportSnapshotLog();
-    } else if (backupContext.getType() == BackupType.INCREMENTAL) {
-      this.cleanupDistCpLog();
-    }
-
-    LOG.info("Backup " + backupContext.getBackupId() + " completed.");
-  }
-
-  /**
-   * Get backup request meta data dir as string.
-   * @param backupContext backup context
-   * @return meta data dir
-   */
-  private String obtainBackupMetaDataStr(BackupContext backupContext) {
-    StringBuffer sb = new StringBuffer();
-    sb.append("type=" + backupContext.getType() + ",tablelist=");
-    for (TableName table : backupContext.getTables()) {
-      sb.append(table + ";");
-    }
-    if (sb.lastIndexOf(";") > 0) {
-      sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1);
-    }
-    sb.append(",targetRootDir=" + backupContext.getTargetRootDir());
-
-    return sb.toString();
-  }
-
-  /**
-   * Do snapshot copy.
-   * @param backupContext backup context
-   * @throws Exception exception
-   */
-  private void snapshotCopy(BackupContext backupContext) throws Exception {
-    LOG.info("Snapshot copy is starting.");
-
-    // set overall backup phase: snapshot_copy
-    backupContext.setPhase(BackupPhase.SNAPSHOTCOPY);
-
-    // avoid action if has been cancelled
-    if (backupContext.isCancelled()) {
-      return;
-    }
-
-    // call ExportSnapshot to copy files based on hbase snapshot for backup
-    // ExportSnapshot only support single snapshot export, need loop for multiple tables case
-    BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf);
-
-    // number of snapshots matches number of tables
-    float numOfSnapshots = backupContext.getSnapshotNames().size();
-
-    LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied.");
-
-    for (TableName table : backupContext.getTables()) {
-      // Currently we simply set the sub copy tasks by counting the table snapshot number, we can
-      // calculate the real files' size for the percentage in the future.
-      // TODO this below
-      // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
-      int res = 0;
-      String[] args = new String[4];
-      args[0] = "-snapshot";
-      args[1] = backupContext.getSnapshotName(table);
-      args[2] = "-copy-to";
-      args[3] = backupContext.getBackupStatus(table).getTargetDir();
-
-      LOG.debug("Copy snapshot " + args[1] + " to " + args[3]);
-      res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args);
-      // if one snapshot export failed, do not continue for remained snapshots
-      if (res != 0) {
-        LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + ".");
-
-        throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3]
-            + " with reason code " + res);
-      }
-
-      LOG.info("Snapshot copy " + args[1] + " finished.");
-    }
-  }
-
-  /**
-   * Wrap a SnapshotDescription for a target table.
-   * @param table table
-   * @return a SnapshotDescription especially for backup.
-   */
-  private SnapshotDescription wrapSnapshotDescription(TableName tableName) {
-    // Mock a SnapshotDescription from backupContext to call SnapshotManager function,
-    // Name it in the format "snapshot_<timestamp>_<table>"
-    HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder();
-    builder.setTable(tableName.getNameAsString());
-    builder.setName("snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
-        + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString());
-    HBaseProtos.SnapshotDescription backupSnapshot = builder.build();
-
-    LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName()
-      + " from backupContext to request snapshot for backup.");
-
-    return backupSnapshot;
-  }
-
-  /**
-   * Delete HBase snapshot for backup.
-   * @param backupCtx backup context
-   * @throws Exception exception
-   */
-  private void deleteSnapshot(BackupContext backupCtx) throws IOException {
-
-    LOG.debug("Trying to delete snapshot for full backup.");
-    Connection conn = null;
-    Admin admin = null;
-    try {
-      conn = ConnectionFactory.createConnection(conf);
-      admin = conn.getAdmin();
-      for (String snapshotName : backupCtx.getSnapshotNames()) {
-        if (snapshotName == null) {
-          continue;
-        }
-        LOG.debug("Trying to delete snapshot: " + snapshotName);
-        admin.deleteSnapshot(snapshotName);
-        LOG.debug("Deleting the snapshot " + snapshotName + " for backup "
-            + backupCtx.getBackupId() + " succeeded.");
-      }
-    } finally {
-      if (admin != null) {
-        admin.close();
-      }
-      if (conn != null) {
-        conn.close();
-      }
-    }
-  }
-
-  /**
-   * Clean up directories with prefix "exportSnapshot-", which are generated when exporting
-   * snapshots.
-   * @throws IOException exception
-   */
-  private void cleanupExportSnapshotLog() throws IOException {
-    FileSystem fs = FSUtils.getCurrentFileSystem(conf);
-    Path stagingDir =
-        new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory()
-          .toString()));
-    FileStatus[] files = FSUtils.listStatus(fs, stagingDir);
-    if (files == null) {
-      return;
-    }
-    for (FileStatus file : files) {
-      if (file.getPath().getName().startsWith("exportSnapshot-")) {
-        LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName());
-        if (FSUtils.delete(fs, file.getPath(), true) == false) {
-          LOG.warn("Can not delete " + file.getPath());
-        }
-      }
-    }
-  }
-
-  /**
-   * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying
-   * hlogs.
-   * @throws IOException exception
-   */
-  private void cleanupDistCpLog() throws IOException {
-    Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent();
-    FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
-    FileStatus[] files = FSUtils.listStatus(fs, rootPath);
-    if (files == null) {
-      return;
-    }
-    for (FileStatus file : files) {
-      if (file.getPath().getName().startsWith("_distcp_logs")) {
-        LOG.debug("Delete log files of DistCp: " + file.getPath().getName());
-        FSUtils.delete(fs, file.getPath(), true);
-      }
-    }
-  }
-
-  /**
-   * Clean up the uncompleted data at target directory if the ongoing backup has already entered the
-   * copy phase.
-   */
-  private void cleanupTargetDir() {
-    try {
-      // clean up the uncompleted data at target directory if the ongoing backup has already entered
-      // the copy phase
-      LOG.debug("Trying to cleanup up target dir. Current backup phase: "
-          + backupContext.getPhase());
-      if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
-          || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY)
-          || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) {
-        FileSystem outputFs =
-            FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf);
-
-        // now treat one backup as a transaction, clean up data that has been partially copied at
-        // table level
-        for (TableName table : backupContext.getTables()) {
-          Path targetDirPath =
-              new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(),
-                backupContext.getBackupId(), table));
-          if (outputFs.delete(targetDirPath, true)) {
-            LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString()
-              + " done.");
-          } else {
-            LOG.info("No data has been copied to " + targetDirPath.toString() + ".");
-          }
-
-          Path tableDir = targetDirPath.getParent();
-          FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir);
-          if (backups == null || backups.length == 0) {
-            outputFs.delete(tableDir, true);
-            LOG.debug(tableDir.toString() + " is empty, remove it.");
-          }
-        }
-      }
-
-    } catch (IOException e1) {
-      LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at "
-          + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
-    }
-  }
-
-  /**
-   * Add manifest for the current backup. The manifest is stored
-   * within the table backup directory.
-   * @param backupContext The current backup context
-   * @throws IOException exception
-   * @throws BackupException exception
-   */
-  private void addManifest(BackupContext backupContext) throws IOException, BackupException {
-    // set the overall backup phase : store manifest
-    backupContext.setPhase(BackupPhase.STORE_MANIFEST);
-
-    // avoid action if has been cancelled
-    if (backupContext.isCancelled()) {
-      return;
-    }
-
-    BackupManifest manifest;
-
-    // Since we have each table's backup in its own directory structure,
-    // we'll store its manifest with the table directory.
-    for (TableName table : backupContext.getTables()) {
-      manifest = new BackupManifest(backupContext, table);
-      ArrayList<BackupImage> ancestors = this.backupManager.getAncestors(backupContext, table);
-      for (BackupImage image : ancestors) {
-        manifest.addDependentImage(image);
-      }
-
-      if (backupContext.getType() == BackupType.INCREMENTAL) {
-        // We'll store the log timestamps for this table only in its manifest.
-        HashMap<TableName, HashMap<String, Long>> tableTimestampMap =
-            new HashMap<TableName, HashMap<String, Long>>();
-        tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table));
-        manifest.setIncrTimestampMap(tableTimestampMap);
-      }
-      manifest.store(conf);
-    }
-
-    // For incremental backup, we store a overall manifest in
-    // <backup-root-dir>/WALs/<backup-id>
-    // This is used when created the next incremental backup
-    if (backupContext.getType() == BackupType.INCREMENTAL) {
-      manifest = new BackupManifest(backupContext);
-      // set the table region server start and end timestamps for incremental backup
-      manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap());
-      ArrayList<BackupImage> ancestors = this.backupManager.getAncestors(backupContext);
-      for (BackupImage image : ancestors) {
-        manifest.addDependentImage(image);
-      }
-      manifest.store(conf);
-    }
-  }
-
-  /**
-   * Do incremental copy.
-   * @param backupContext backup context
-   */
-  private void incrementalCopy(BackupContext backupContext) throws Exception {
-
-    LOG.info("Incremental copy is starting.");
-
-    // set overall backup phase: incremental_copy
-    backupContext.setPhase(BackupPhase.INCREMENTAL_COPY);
-
-    // avoid action if has been cancelled
-    if (backupContext.isCancelled()) {
-      return;
-    }
-
-    // get incremental backup file list and prepare parms for DistCp
-    List<String> incrBackupFileList = backupContext.getIncrBackupFileList();
-    // filter missing files out (they have been copied by previous backups)
-    incrBackupFileList = filterMissingFiles(incrBackupFileList);
-    String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]);
-    strArr[strArr.length - 1] = backupContext.getHLogTargetDir();
-
-    BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf);
-    int res = copyService.copy(backupContext, backupManager, conf,
-      BackupCopyService.Type.INCREMENTAL, strArr);
-
-    if (res != 0) {
-      LOG.error("Copy incremental log files failed with return code: " + res + ".");
-      throw new IOException("Failed of Hadoop Distributed Copy from " + incrBackupFileList + " to "
-          + backupContext.getHLogTargetDir());
-    }
-    LOG.info("Incremental copy from " + incrBackupFileList + " to "
-        + backupContext.getHLogTargetDir() + " finished.");
-
-  }
-
-  private List<String> filterMissingFiles(List<String> incrBackupFileList) throws IOException {
-    FileSystem fs = FileSystem.get(conf);
-    List<String> list = new ArrayList<String>();
-    for(String file : incrBackupFileList){
-      if(fs.exists(new Path(file))){
-        list.add(file);
-      } else{
-        LOG.warn("Can't find file: "+file);
-      }
-    }
-    return list;
-  }
-
-  private String getMessage(Exception e) {
-    String msg = e.getMessage();
-    if (msg == null || msg.equals("")) {
-      msg = e.getClass().getName();
-    }
-    return msg;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index a4b0a0a..b4d47d3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupContext.BackupState;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
 import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData;
 import org.apache.hadoop.hbase.backup.master.BackupLogCleaner;
@@ -204,7 +204,7 @@ public class BackupManager implements Closeable {
       throw new BackupException("Wrong backup request parameter: target backup root directory");
     }
 
-    if (type == BackupType.FULL && tableList == null) {
+    if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) {
       // If table list is null for full backup, which means backup all tables. Then fill the table
       // list with all user tables from meta. It no table available, throw the request exception.
 
@@ -270,37 +270,8 @@ public class BackupManager implements Closeable {
     ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
   }
 
-  /**
-   * Dispatch and handle a backup request.
-   * @param backupContext backup context
-   * @throws BackupException exception
-   */
-  public void dispatchRequest(BackupContext backupContext) throws BackupException {
-
+  public void setBackupContext(BackupContext backupContext) {
     this.backupContext = backupContext;
-
-    LOG.info("Got a backup request: " + "Type: " + backupContext.getType() + "; Tables: "
-        + backupContext.getTableNames() + "; TargetRootDir: " + backupContext.getTargetRootDir());
-
-    // dispatch the request to a backup handler and put it handler map
-
-    BackupHandler handler = new BackupHandler(this.backupContext, this, conf, this.conn);
-    Future<Void> future = this.pool.submit(handler);
-    // wait for the execution to complete
-    try {
-      future.get();
-    } catch (InterruptedException e) {
-      throw new BackupException(e);
-    } catch (CancellationException e) {
-      throw new BackupException(e);
-    } catch (ExecutionException e) {
-      throw new BackupException(e);
-    }
-
-    // mark the backup complete for exit handler's processing
-    backupComplete = true;
-
-    LOG.info("Backup request " + backupContext.getBackupId() + " has been executed.");
   }
 
   /**
@@ -476,7 +447,7 @@ public class BackupManager implements Closeable {
    * @throws IOException exception
    */
   public Set<TableName> getIncrementalBackupTableSet() throws IOException {
-    return systemTable.getIncrementalBackupTableSet();
+    return BackupSystemTableHelper.getIncrementalBackupTableSet(getConnection());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java
index 8b8a83f..731ccd3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java
@@ -22,19 +22,13 @@ import org.apache.hadoop.hbase.snapshot.ExportSnapshot;
 
 /* this class will be extended in future jira to support progress report */
 public class BackupSnapshotCopy extends ExportSnapshot {
-  private BackupHandler backupHandler;
   private String table;
 
-  public BackupSnapshotCopy(BackupHandler backupHandler, String table) {
+  public BackupSnapshotCopy(String table) {
     super();
-    this.backupHandler = backupHandler;
     this.table = table;
   }
 
-  public BackupHandler getBackupHandler() {
-    return this.backupHandler;
-  }
-
   public String getTable() {
     return this.table;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index f4d6761..c104dd8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupContext.BackupState;
 import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
@@ -412,32 +412,6 @@ public final class BackupSystemTable implements Closeable {
   }
 
   /**
-   * Return the current tables covered by incremental backup.
-   * @return set of tableNames
-   * @throws IOException exception
-   */
-  public Set<TableName> getIncrementalBackupTableSet() throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get incr backup table set from hbase:backup");
-    }
-    TreeSet<TableName> set = new TreeSet<>();
-
-    try (Table table = connection.getTable(tableName)) {
-      Get get = BackupSystemTableHelper.createGetForIncrBackupTableSet();
-      Result res = table.get(get);
-      if (res.isEmpty()) {
-        return set;
-      }
-      List<Cell> cells = res.listCells();
-      for (Cell cell : cells) {
-        // qualifier = table name - we use table names as qualifiers
-        set.add(TableName.valueOf(CellUtil.cloneQualifier(cell)));
-      }
-      return set;
-    }
-  }
-
-  /**
    * Add tables to global incremental backup set
    * @param tables - set of tables
    * @throws IOException exception
@@ -445,6 +419,9 @@ public final class BackupSystemTable implements Closeable {
   public void addIncrementalBackupTableSet(Set<TableName> tables) throws IOException {
     if (LOG.isDebugEnabled()) {
       LOG.debug("add incr backup table set to hbase:backup");
+      for (TableName table : tables) {
+        LOG.debug(table);
+      }
     }
     try (Table table = connection.getTable(tableName)) {
       Put put = BackupSystemTableHelper.createPutForIncrBackupTableSet(tables);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
index ac096b7..04ccbdc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
@@ -22,17 +22,22 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Set;
+import java.util.TreeSet;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
 
 
@@ -43,7 +48,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public final class BackupSystemTableHelper {
-
+  private static final Log LOG = LogFactory.getLog(BackupSystemTableHelper.class);
   /**
    * hbase:backup schema:
    * 1. Backup sessions rowkey= "session." + backupId; value = serialized
@@ -157,6 +162,33 @@ public final class BackupSystemTableHelper {
   }
 
   /**
+   * Return the current tables covered by incremental backup.
+   * @return set of tableNames
+   * @throws IOException exception
+   */
+  public static Set<TableName> getIncrementalBackupTableSet(Connection connection)
+      throws IOException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("get incr backup table set from hbase:backup");
+    }
+    TreeSet<TableName> set = new TreeSet<>();
+
+    try (Table table = connection.getTable(TableName.BACKUP_TABLE_NAME)) {
+      Get get = BackupSystemTableHelper.createGetForIncrBackupTableSet();
+      Result res = table.get(get);
+      if (res.isEmpty()) {
+        return set;
+      }
+      List<Cell> cells = res.listCells();
+      for (Cell cell : cells) {
+        // qualifier = table name - we use table names as qualifiers
+        set.add(TableName.valueOf(CellUtil.cloneQualifier(cell)));
+      }
+      return set;
+    }
+  }
+
+  /**
    * Creates Put to store incremental backup table set
    * @param tables tables
    * @return put operation

http://git-wip-us.apache.org/repos/asf/hbase/blob/b37cc760/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java
index 96812c9..660a14f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java
@@ -18,11 +18,9 @@
 
 package org.apache.hadoop.hbase.backup.impl;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.net.URLDecoder;
+import java.io.InterruptedIOException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -35,25 +33,27 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.BackupClientUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.backup.BackupClientUtil;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
@@ -71,6 +71,40 @@ public final class BackupUtil {
     throw new AssertionError("Instantiating utility class...");
   }
 
+  public static void waitForSnapshot(SnapshotDescription snapshot, long max,
+      SnapshotManager snapshotMgr, Configuration conf) throws IOException {
+    boolean done = false;
+    long start = EnvironmentEdgeManager.currentTime();
+    int numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+    long maxPauseTime = max / numRetries;
+    int tries = 0;
+    LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
+        ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
+        maxPauseTime + " ms per retry)");
+    while (tries == 0
+        || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
+      try {
+        // sleep a backoff <= pauseTime amount
+        long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
+          HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
+        long sleep = HBaseAdmin.getPauseTime(tries++, pause);
+        sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
+        LOG.debug("(#" + tries + ") Sleeping: " + sleep +
+          "ms while waiting for snapshot completion.");
+        Thread.sleep(sleep);
+      } catch (InterruptedException e) {
+        throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
+      }
+      LOG.debug("Getting current status of snapshot ...");
+      done = snapshotMgr.isSnapshotDone(snapshot);
+    }
+    if (!done) {
+      throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
+          + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
+    }
+  }
+
   /**
    * Loop through the RS log timestamp map for the tables, for each RS, find the min timestamp
    * value for the RS among the tables.
@@ -139,7 +173,6 @@ public final class BackupUtil {
       descriptors.createTableDescriptorForTableDirectory(target, orig, false);
       LOG.debug("Finished copying tableinfo.");
 
-      HBaseAdmin hbadmin = null;
       // TODO: optimize
       List<HRegionInfo> regions = null;
       try(Connection conn = ConnectionFactory.createConnection(conf);


Mime
View raw message