hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e...@apache.org
Subject [48/50] [abbrv] hbase git commit: HBASE-14030 HBase Backup/Restore Phase 1 (v42)
Date Tue, 22 Mar 2016 02:42:17 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-protocol/src/main/protobuf/Backup.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Backup.proto b/hbase-protocol/src/main/protobuf/Backup.proto
new file mode 100644
index 0000000..383b990
--- /dev/null
+++ b/hbase-protocol/src/main/protobuf/Backup.proto
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains Backup manifest
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "BackupProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+
+enum BackupType {
+  FULL = 0;
+  INCREMENTAL = 1;
+}
+
+message BackupImage {
+  required string backup_id = 1;
+  required BackupType backup_type = 2;
+  required string root_dir = 3;
+  repeated TableName table_list = 4;
+  required uint64 start_ts = 5;
+  required uint64 complete_ts = 6;
+  repeated BackupImage ancestors = 7; 
+}
+
+message ServerTimestamp {
+  required string server = 1;
+  required uint64 timestamp = 2;
+}
+
+message TableServerTimestamp {
+  required TableName table = 1;
+  repeated ServerTimestamp server_timestamp = 2;
+}
+
+message BackupManifest {
+  required string version = 1;
+  required string backup_id = 2;
+  required BackupType type = 3;
+  repeated TableName table_list = 4;
+  required uint64 start_ts = 5;
+  required uint64 complete_ts = 6;
+  required int64 total_bytes = 7;
+  optional int64 log_bytes  = 8;
+  repeated TableServerTimestamp tst_map = 9;
+  repeated BackupImage dependent_backup_image = 10;
+  required bool compacted = 11; 
+}
+
+message TableBackupStatus {
+  required TableName table = 1;
+  required string target_dir = 2;
+  optional string snapshot = 3; 	
+}
+
+message BackupContext {
+  required string backup_id = 1;
+  required BackupType type = 2;
+  required string target_root_dir = 3;
+  optional BackupState state = 4;
+  optional BackupPhase phase = 5;
+  optional string failed_message = 6;
+  repeated TableBackupStatus table_backup_status = 7;
+  optional uint64  start_ts = 8;
+  optional uint64  end_ts = 9;
+  optional int64  total_bytes_copied = 10;
+  optional string hlog_target_dir = 11;
+  optional uint32 progress = 12; 
+  
+  enum BackupState {
+    WAITING = 0;
+    RUNNING = 1;
+    COMPLETE = 2;
+    FAILED = 3;
+    CANCELLED = 4;
+  }
+
+  enum BackupPhase {
+    REQUEST = 0;
+    SNAPSHOT = 1;
+    PREPARE_INCREMENTAL = 2;
+    SNAPSHOTCOPY = 3;
+    INCREMENTAL_COPY = 4;
+    STORE_MANIFEST = 5;
+  } 
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index d5f1e30..e4b296a 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -394,6 +394,11 @@
        <version>${project.version}</version>
        <optional>true</optional>
     </dependency>
+     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <version>${hadoop-two.version}</version>
+    </dependency>
     <dependency>
       <groupId>commons-httpclient</groupId>
       <artifactId>commons-httpclient</artifactId>
@@ -407,6 +412,11 @@
       <artifactId>commons-collections</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <version>${hadoop-two.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
new file mode 100644
index 0000000..7c8ea39
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+
+public interface BackupClient {
+
+  public void setConf(Configuration conf);
+
+  /**
+   * Send backup request to server, and monitor the progress if necessary
+   * @param backupType : full or incremental
+   * @param targetRootDir : the root path specified by user
+   * @param tableList : the table list specified by user
+   * @return backupId backup id
+   * @throws IOException exception
+   */
+ public String create(BackupType backupType, List<TableName> tableList,
+      String targetRootDir) throws IOException;
+ }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
new file mode 100644
index 0000000..015c80b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.backup.impl.BackupCommands;
+import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.LogUtils;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+public class BackupDriver extends AbstractHBaseTool {
+
+  private static final Log LOG = LogFactory.getLog(BackupDriver.class);
+  private Options opt;
+  private CommandLine cmd;
+
+  protected void init() throws IOException {
+    // define supported options
+    opt = new Options();
+    opt.addOption("debug", false, "Enable debug loggings");
+
+    // disable irrelevant loggers to avoid it mess up command output
+    LogUtils.disableUselessLoggers(LOG);
+  }
+
+  private int parseAndRun(String[] args) throws IOException {
+    String cmd = null;
+    String[] remainArgs = null;
+    if (args == null || args.length == 0) {
+      BackupCommands.createCommand(getConf(),
+        BackupRestoreConstants.BackupCommand.HELP, null).execute();
+    } else {
+      cmd = args[0];
+      remainArgs = new String[args.length - 1];
+      if (args.length > 1) {
+        System.arraycopy(args, 1, remainArgs, 0, args.length - 1);
+      }
+    }
+    CommandLine cmdline = null;
+    try {
+      cmdline = new PosixParser().parse(opt, remainArgs);
+    } catch (ParseException e) {
+      LOG.error("Could not parse command", e);
+      return -1;
+    }
+
+    BackupCommand type = BackupCommand.HELP;
+    if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.CREATE;
+    } else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.HELP;
+    } else {
+      System.out.println("Unsupported command for backup: " + cmd);
+      return -1;
+    }
+
+    // enable debug logging
+    Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
+    if (cmdline.hasOption("debug")) {
+      backupClientLogger.setLevel(Level.DEBUG);
+    } else {
+      backupClientLogger.setLevel(Level.INFO);
+    }
+
+    // TODO: get rid of Command altogether?
+    BackupCommands.createCommand(getConf(), type, cmdline).execute();
+    return 0;
+  }
+
+  @Override
+  protected void addOptions() {
+  }
+
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    this.cmd = cmd;
+  }
+
+  @Override
+  protected int doWork() throws Exception {
+    init();
+    return parseAndRun(cmd.getArgs());
+  }
+
+  public static void main(String[] args) throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    int ret = ToolRunner.run(conf, new BackupDriver(), args);
+    System.exit(ret);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
new file mode 100644
index 0000000..6fbfe18
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.backup.impl.BackupClientImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupCopyService;
+import org.apache.hadoop.hbase.backup.impl.IncrementalRestoreService;
+import org.apache.hadoop.hbase.backup.impl.RestoreClientImpl;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyService;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceRestoreService;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.util.ReflectionUtils;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BackupRestoreFactory {
+
+  public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class";
+  public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class";
+  public final static String HBASE_BACKUP_CLIENT_IMPL_CLASS = "hbase.backup.client.class";
+  public final static String HBASE_RESTORE_CLIENT_IMPL_CLASS = "hbase.restore.client.class";
+
+  private BackupRestoreFactory(){
+    throw new AssertionError("Instantiating utility class...");
+  }
+  
+  /**
+   * Gets incremental restore service
+   * @param conf - configuration
+   * @return incremental backup service instance
+   */
+  public static IncrementalRestoreService getIncrementalRestoreService(Configuration conf) {
+    Class<? extends IncrementalRestoreService> cls =
+        conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreService.class,
+          IncrementalRestoreService.class);
+    return ReflectionUtils.newInstance(cls, conf);
+  }
+  
+  /**
+   * Gets backup copy service
+   * @param conf - configuration
+   * @return backup copy service
+   */
+  public static BackupCopyService getBackupCopyService(Configuration conf) {
+    Class<? extends BackupCopyService> cls =
+        conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyService.class,
+          BackupCopyService.class);
+    return ReflectionUtils.newInstance(cls, conf);
+  }
+  
+  /**
+   * Gets backup client implementation
+   * @param conf - configuration
+   * @return backup client
+   */
+  public static BackupClient getBackupClient(Configuration conf) {
+    Class<? extends BackupClient> cls =
+        conf.getClass(HBASE_BACKUP_CLIENT_IMPL_CLASS, BackupClientImpl.class,
+          BackupClient.class);
+    BackupClient client = ReflectionUtils.newInstance(cls, conf);
+    client.setConf(conf);
+    return client;
+  }
+  
+  /**
+   * Gets restore client implementation
+   * @param conf - configuration
+   * @return backup client
+   */
+  public static RestoreClient getRestoreClient(Configuration conf) {
+    Class<? extends RestoreClient> cls =
+        conf.getClass(HBASE_RESTORE_CLIENT_IMPL_CLASS, RestoreClientImpl.class,
+          RestoreClient.class);
+    RestoreClient client = ReflectionUtils.newInstance(cls, conf);
+    client.setConf(conf);
+    return client;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
new file mode 100644
index 0000000..6e5a355
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
@@ -0,0 +1,472 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * View to an on-disk Backup Image FileSytem
+ * Provides the set of methods necessary to interact with the on-disk Backup Image data.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class HBackupFileSystem {
+  public static final Log LOG = LogFactory.getLog(HBackupFileSystem.class);
+
+  private final String RESTORE_TMP_PATH = "/tmp";
+  private final String[] ignoreDirs = { "recovered.edits" };
+
+  private final Configuration conf;
+  private final FileSystem fs;
+  private final Path backupRootPath;
+  private final Path restoreTmpPath;
+  private final String backupId;
+
+  /**
+   * Create a view to the on-disk Backup Image.
+   * @param conf  to use
+   * @param backupPath  to where the backup Image stored
+   * @param backupId represent backup Image
+   */
+  public HBackupFileSystem(final Configuration conf, final Path backupRootPath, final String backupId)
+      throws IOException {
+    this.conf = conf;
+    this.fs = backupRootPath.getFileSystem(conf);
+    this.backupRootPath = backupRootPath;
+    this.backupId = backupId; // the backup ID for the lead backup Image
+    this.restoreTmpPath = new Path(conf.get("hbase.fs.tmp.dir") != null?
+          conf.get("hbase.fs.tmp.dir"): RESTORE_TMP_PATH,
+        "restore");
+  }
+
+  public Path getBackupRootPath() {
+    return backupRootPath;
+  }
+
+  public String getBackupId() {
+    return backupId;
+  }
+
+  /**
+   * @param tableName is the table backed up
+   * @return {@link HTableDescriptor} saved in backup image of the table
+   */
+  public HTableDescriptor getTableDesc(TableName tableName)
+      throws FileNotFoundException, IOException {
+    Path tableInfoPath = this.getTableInfoPath(tableName);
+    SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
+    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
+    HTableDescriptor tableDescriptor = manifest.getTableDescriptor();
+    if (!tableDescriptor.getNameAsString().equals(tableName)) {
+      LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
+          + tableInfoPath.toString());
+      LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString());
+    }
+    return tableDescriptor;
+  }
+
+  /**
+   * Given the backup root dir, backup id and the table name, return the backup image location,
+   * which is also where the backup manifest file is. return value look like:
+   * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/default/t1_dn/backup_1396650096738"
+   * @param backupRootDir backup root directory
+   * @param backupId  backup id
+   * @param table table name
+   * @return backupPath String for the particular table
+   */
+  public static String getTableBackupDir(String backupRootDir, String backupId,
+      TableName tableName) {
+    return backupRootDir + Path.SEPARATOR + tableName.getNamespaceAsString() + Path.SEPARATOR
+        + tableName.getQualifierAsString() + Path.SEPARATOR + backupId;
+  }
+
+  /**
+   * Given the backup root dir, backup id and the table name, return the backup image location,
+   * which is also where the backup manifest file is. return value look like:
+   * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/default/t1_dn/backup_1396650096738"
+   * @param backupRootPath backup root path
+   * @param tableName table name
+   * @param backupId backup Id
+   * @return backupPath for the particular table
+   */
+  public static Path getTableBackupPath(Path backupRootPath, TableName tableName, String backupId) {
+    return new Path(backupRootPath, tableName.getNamespaceAsString() + Path.SEPARATOR
+      + tableName.getQualifierAsString() + Path.SEPARATOR + backupId);
+  }
+
+  /**
+   * return value represent path for:
+   * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/.hbase-snapshot"
+   * @param backupRootPath backup root path
+   * @param tableName table name
+   * @param backupId backup Id
+   * @return path for snapshot
+   */
+  public static Path getTableSnapshotPath(Path backupRootPath, TableName tableName,
+      String backupId) {
+    return new Path(getTableBackupPath(backupRootPath, tableName, backupId),
+      HConstants.SNAPSHOT_DIR_NAME);
+  }
+
+  /**
+   * return value represent path for:
+   * "..../default/t1_dn/backup_1396650096738/.hbase-snapshot/snapshot_1396650097621_default_t1_dn"
+   * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo,
+   * .data.manifest (trunk)
+   * @param tableName table name
+   * @return path to table info
+   * @throws FileNotFoundException exception
+   * @throws IOException exception
+   */
+  public Path getTableInfoPath(TableName tableName)
+      throws FileNotFoundException, IOException {
+    Path tableSnapShotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
+    Path tableInfoPath = null;
+
+    // can't build the path directly as the timestamp values are different
+    FileStatus[] snapshots = fs.listStatus(tableSnapShotPath);
+    for (FileStatus snapshot : snapshots) {
+      tableInfoPath = snapshot.getPath();
+      // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest";
+      if (tableInfoPath.getName().endsWith("data.manifest")) {
+        break;
+      }
+    }
+    return tableInfoPath;
+  }
+
+  /**
+   * return value represent path for:
+   * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
+   * @param tabelName table name
+   * @return path to table archive
+   * @throws IOException exception
+   */
+  public Path getTableArchivePath(TableName tableName)
+      throws IOException {
+    Path baseDir = new Path(getTableBackupPath(backupRootPath, tableName, backupId),
+      HConstants.HFILE_ARCHIVE_DIRECTORY);
+    Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
+    Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
+    Path tableArchivePath =
+        new Path(archivePath, tableName.getQualifierAsString());
+    if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) {
+      LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists");
+      tableArchivePath = null; // empty table has no archive
+    }
+    return tableArchivePath;
+  }
+
+  /**
+   * Given the backup root dir and the backup id, return the log file location for an incremental
+   * backup.
+   * @param backupRootDir backup root directory
+   * @param backupId backup id
+   * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
+   */
+  public static String getLogBackupDir(String backupRootDir, String backupId) {
+    return backupRootDir + Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME + Path.SEPARATOR
+        + backupId;
+  }
+
+  public static Path getLogBackupPath(String backupRootDir, String backupId) {
+    return new Path(getLogBackupDir(backupRootDir, backupId));
+  }
+
+  private static Path getManifestPath(TableName tableName, Configuration conf,
+      Path backupRootPath, String backupId) throws IOException {
+    Path manifestPath = new Path(getTableBackupPath(backupRootPath, tableName, backupId),
+      BackupManifest.MANIFEST_FILE_NAME);
+    FileSystem fs = backupRootPath.getFileSystem(conf);
+    if (!fs.exists(manifestPath)) {
+      // check log dir for incremental backup case
+      manifestPath =
+          new Path(getLogBackupDir(backupRootPath.toString(), backupId) + Path.SEPARATOR
+            + BackupManifest.MANIFEST_FILE_NAME);
+      if (!fs.exists(manifestPath)) {
+        String errorMsg =
+            "Could not find backup manifest for " + backupId + " in " + backupRootPath.toString();
+        throw new IOException(errorMsg);
+      }
+    }
+    return manifestPath;
+  }
+
+  public static BackupManifest getManifest(TableName tableName, Configuration conf,
+      Path backupRootPath, String backupId) throws IOException {
+    BackupManifest manifest = new BackupManifest(conf,
+      getManifestPath(tableName, conf, backupRootPath, backupId));
+    return manifest;
+  }
+
+  /**
+   * Gets region list
+   * @param tableName table name
+   * @return RegionList region list
+   * @throws FileNotFoundException exception
+   * @throws IOException exception
+   */
+
+  public ArrayList<Path> getRegionList(TableName tableName)
+      throws FileNotFoundException, IOException {
+    Path tableArchivePath = this.getTableArchivePath(tableName);
+    ArrayList<Path> regionDirList = new ArrayList<Path>();
+    FileStatus[] children = fs.listStatus(tableArchivePath);
+    for (FileStatus childStatus : children) {
+      // here child refer to each region(Name)
+      Path child = childStatus.getPath();
+      regionDirList.add(child);
+    }
+    return regionDirList;
+  }
+
+  /**
+   * Gets region list
+   * @param tableArchivePath table archive path
+   * @return RegionList region list
+   * @throws FileNotFoundException exception
+   * @throws IOException exception
+   */
+  public ArrayList<Path> getRegionList(Path tableArchivePath) throws FileNotFoundException,
+  IOException {
+    ArrayList<Path> regionDirList = new ArrayList<Path>();
+    FileStatus[] children = fs.listStatus(tableArchivePath);
+    for (FileStatus childStatus : children) {
+      // here child refer to each region(Name)
+      Path child = childStatus.getPath();
+      regionDirList.add(child);
+    }
+    return regionDirList;
+  }
+
+  /**
+   * Counts the number of files in all subdirectories of an HBase tables, i.e. HFiles. And finds the
+   * maximum number of files in one HBase table.
+   * @param tableArchivePath archive path
+   * @return the maximum number of files found in 1 HBase table
+   * @throws IOException exception
+   */
+  public int getMaxNumberOfFilesInSubDir(Path tableArchivePath) throws IOException {
+    int result = 1;
+    ArrayList<Path> regionPathList = this.getRegionList(tableArchivePath);
+    // tableArchivePath = this.getTableArchivePath(tableName);
+
+    if (regionPathList == null || regionPathList.size() == 0) {
+      throw new IllegalStateException("Cannot restore hbase table because directory '"
+          + tableArchivePath + "' is not a directory.");
+    }
+
+    for (Path regionPath : regionPathList) {
+      result = Math.max(result, getNumberOfFilesInDir(regionPath));
+    }
+    return result;
+  }
+
+  /**
+   * Counts the number of files in all subdirectories of an HBase table, i.e. HFiles.
+   * @param regionPath Path to an HBase table directory
+   * @return the number of files all directories
+   * @throws IOException exception
+   */
+  public int getNumberOfFilesInDir(Path regionPath) throws IOException {
+    int result = 0;
+
+    if (!fs.exists(regionPath) || !fs.getFileStatus(regionPath).isDirectory()) {
+      throw new IllegalStateException("Cannot restore hbase table because directory '"
+          + regionPath.toString() + "' is not a directory.");
+    }
+
+    FileStatus[] tableDirContent = fs.listStatus(regionPath);
+    for (FileStatus subDirStatus : tableDirContent) {
+      FileStatus[] colFamilies = fs.listStatus(subDirStatus.getPath());
+      for (FileStatus colFamilyStatus : colFamilies) {
+        FileStatus[] colFamilyContent = fs.listStatus(colFamilyStatus.getPath());
+        result += colFamilyContent.length;
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Duplicate the backup image if it's on local cluster
+   * @see HStore#bulkLoadHFile(String, long)
+   * @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum)
+   * @param tableArchivePath archive path
+   * @return the new tableArchivePath
+   * @throws IOException exception
+   */
+  public Path checkLocalAndBackup(Path tableArchivePath) throws IOException {
+    // Move the file if it's on local cluster
+    boolean isCopyNeeded = false;
+
+    FileSystem srcFs = tableArchivePath.getFileSystem(conf);
+    FileSystem desFs = FileSystem.get(conf);
+    if (tableArchivePath.getName().startsWith("/")) {
+      isCopyNeeded = true;
+    } else {
+      // This should match what is done in @see HRegionFileSystem#bulkLoadStoreFile(String, Path,
+      // long)
+      if (srcFs.getUri().equals(desFs.getUri())) {
+        LOG.debug("cluster hold the backup image: " + srcFs.getUri() + "; local cluster node: "
+            + desFs.getUri());
+        isCopyNeeded = true;
+      }
+    }
+    if (isCopyNeeded) {
+      LOG.debug("File " + tableArchivePath + " on local cluster, back it up before restore");
+      if (desFs.exists(restoreTmpPath)) {
+        try {
+          desFs.delete(restoreTmpPath, true);
+        } catch (IOException e) {
+          LOG.debug("Failed to delete path: " + restoreTmpPath
+            + ", need to check whether restore target DFS cluster is healthy");
+        }
+      }
+      FileUtil.copy(srcFs, tableArchivePath, desFs, restoreTmpPath, false, conf);
+      LOG.debug("Copied to temporary path on local cluster: " + restoreTmpPath);
+      tableArchivePath = restoreTmpPath;
+    }
+    return tableArchivePath;
+  }
+
+  /**
+   * Calculate region boundaries and add all the column families to the table descriptor
+   * @param regionDirList region dir list
+   * @return a set of keys to store the boundaries
+   */
+  public byte[][] generateBoundaryKeys(ArrayList<Path> regionDirList)
+      throws FileNotFoundException, IOException {
+    TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
+    // Build a set of keys to store the boundaries
+    byte[][] keys = null;
+    // calculate region boundaries and add all the column families to the table descriptor
+    for (Path regionDir : regionDirList) {
+      LOG.debug("Parsing region dir: " + regionDir);
+      Path hfofDir = regionDir;
+
+      if (!fs.exists(hfofDir)) {
+        LOG.warn("HFileOutputFormat dir " + hfofDir + " not found");
+      }
+
+      FileStatus[] familyDirStatuses = fs.listStatus(hfofDir);
+      if (familyDirStatuses == null) {
+        throw new IOException("No families found in " + hfofDir);
+      }
+
+      for (FileStatus stat : familyDirStatuses) {
+        if (!stat.isDirectory()) {
+          LOG.warn("Skipping non-directory " + stat.getPath());
+          continue;
+        }
+        boolean isIgnore = false;
+        String pathName = stat.getPath().getName();
+        for (String ignore : ignoreDirs) {
+          if (pathName.contains(ignore)) {
+            LOG.warn("Skipping non-family directory" + pathName);
+            isIgnore = true;
+            break;
+          }
+        }
+        if (isIgnore) {
+          continue;
+        }
+        Path familyDir = stat.getPath();
+        LOG.debug("Parsing family dir [" + familyDir.toString() + " in region [" + regionDir + "]");
+        // Skip _logs, etc
+        if (familyDir.getName().startsWith("_") || familyDir.getName().startsWith(".")) {
+          continue;
+        }
+
+        // start to parse hfile inside one family dir
+        Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
+        for (Path hfile : hfiles) {
+          if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".")
+              || StoreFileInfo.isReference(hfile.getName())
+              || HFileLink.isHFileLink(hfile.getName())) {
+            continue;
+          }
+          HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(conf), conf);
+          final byte[] first, last;
+          try {
+            reader.loadFileInfo();
+            first = reader.getFirstRowKey();
+            last = reader.getLastRowKey();
+            LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first="
+                + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
+
+            // To eventually infer start key-end key boundaries
+            Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0;
+            map.put(first, value + 1);
+            value = map.containsKey(last) ? (Integer) map.get(last) : 0;
+            map.put(last, value - 1);
+          } finally {
+            reader.close();
+          }
+        }
+      }
+    }
+    keys = LoadIncrementalHFiles.inferBoundaries(map);
+    return keys;
+  }
+
+  /**
+   * Check whether the backup image path and there is manifest file in the path.
+   * @param backupManifestMap If all the manifests are found, then they are put into this map
+   * @param tableArray the tables involved
+   * @throws IOException exception
+   */
+  public static void checkImageManifestExist(HashMap<TableName, BackupManifest> backupManifestMap,
+      TableName[] tableArray, Configuration conf,
+      Path backupRootPath, String backupId) throws IOException {
+    for (TableName tableName : tableArray) {
+      BackupManifest manifest = getManifest(tableName, conf, backupRootPath, backupId);
+      backupManifestMap.put(tableName, manifest);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java
new file mode 100644
index 0000000..a3aaa98
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+
+public interface RestoreClient {
+
+  public void setConf(Configuration conf);
+
+  /**
+   * Restore operation.
+   * @param backupRootDir The root dir for backup image
+   * @param backupId The backup id for image to be restored
+   * @param check True if only do dependency check
+   * @param autoRestore True if automatically restore following the dependency
+   * @param sTableArray The array of tables to be restored
+   * @param tTableArray The array of mapping tables to restore to
+   * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the
+   *          request if target table exists
+   * @return True if only do dependency check
+   * @throws IOException if any failure during restore
+   */
+  public  boolean restore(
+      String backupRootDir,
+      String backupId, boolean check, boolean autoRestore, TableName[] sTableArray,
+      TableName[] tTableArray, boolean isOverwrite) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
new file mode 100644
index 0000000..541882a
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupUtil;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.LogUtils;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+public class RestoreDriver extends AbstractHBaseTool {
+
+  private static final Log LOG = LogFactory.getLog(BackupDriver.class);
+  private Options opt;
+  private CommandLine cmd;
+
+  private static final String OPTION_OVERWRITE = "overwrite";
+  private static final String OPTION_CHECK = "check";
+  private static final String OPTION_AUTOMATIC = "automatic";
+
+  private static final String USAGE =
+      "Usage: hbase restore <backup_root_path> <backup_id> <tables> [tableMapping] \n"
+          + "       [-overwrite] [-check] [-automatic]\n"
+          + " backup_root_path  The parent location where the backup images are stored\n"
+          + " backup_id         The id identifying the backup image\n"
+          + " table(s)          Table(s) from the backup image to be restored.\n"
+          + "                   Tables are separated by comma.\n"
+          + " Options:\n"
+          + "   tableMapping    A comma separated list of target tables.\n"
+          + "                   If specified, each table in <tables> must have a mapping.\n"
+          + "   -overwrite      With this option, restore overwrites to the existing table "
+          + "if there's any in\n"
+          + "                   restore target. The existing table must be online before restore.\n"
+          + "   -check          With this option, restore sequence and dependencies are checked\n"
+          + "                   and verified without executing the restore\n"
+          + "   -automatic      With this option, all the dependencies are automatically restored\n"
+          + "                   together with this backup image following the correct order.\n"
+          + "                   The restore dependencies can be checked by using \"-check\" "
+          + "option,\n"
+          + "                   or using \"hbase backup describe\" command. Without this option, "
+          + "only\n" + "                   this backup image is restored\n";
+
+  protected void init() throws IOException {
+    // define supported options
+    opt = new Options();
+    opt.addOption(OPTION_OVERWRITE, false,
+        "Overwrite the data if any of the restore target tables exists");
+    opt.addOption(OPTION_CHECK, false, "Check restore sequence and dependencies");
+    opt.addOption(OPTION_AUTOMATIC, false, "Restore all dependencies");
+    opt.addOption("debug", false, "Enable debug logging");
+
+    // disable irrelevant loggers to avoid it mess up command output
+    LogUtils.disableUselessLoggers(LOG);
+  }
+
+  private int parseAndRun(String[] args) {
+    CommandLine cmd = null;
+    try {
+      cmd = new PosixParser().parse(opt, args);
+    } catch (ParseException e) {
+      LOG.error("Could not parse command", e);
+      return -1;
+    }
+
+    // enable debug logging
+    Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
+    if (cmd.hasOption("debug")) {
+      backupClientLogger.setLevel(Level.DEBUG);
+    }
+
+    // whether to overwrite to existing table if any, false by default
+    boolean isOverwrite = cmd.hasOption(OPTION_OVERWRITE);
+    if (isOverwrite) {
+      LOG.debug("Found -overwrite option in restore command, "
+          + "will overwrite to existing table if any in the restore target");
+    }
+
+    // whether to only check the dependencies, false by default
+    boolean check = cmd.hasOption(OPTION_CHECK);
+    if (check) {
+      LOG.debug("Found -check option in restore command, "
+          + "will check and verify the dependencies");
+    }
+
+    // whether to restore all dependencies, false by default
+    boolean autoRestore = cmd.hasOption(OPTION_AUTOMATIC);
+    if (autoRestore) {
+      LOG.debug("Found -automatic option in restore command, "
+          + "will automatically retore all the dependencies");
+    }
+
+    // parse main restore command options
+    String[] remainArgs = cmd.getArgs();
+    if (remainArgs.length < 3) {
+      System.out.println("ERROR: missing arguments");
+      System.out.println(USAGE);
+      return -1;
+    }
+
+    String backupRootDir = remainArgs[0];
+    String backupId = remainArgs[1];
+    String tables = remainArgs[2];
+
+    String tableMapping = (remainArgs.length > 3) ? remainArgs[3] : null;
+
+    TableName[] sTableArray = BackupUtil.parseTableNames(tables);
+    TableName[] tTableArray = BackupUtil.parseTableNames(tableMapping);
+
+    if (sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)) {
+      System.err.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping);
+      System.out.println(USAGE);
+      return -1;
+    }
+
+    try {
+      RestoreClient client = BackupRestoreFactory.getRestoreClient(conf);
+      client.restore(backupRootDir, backupId, check, autoRestore, sTableArray,
+        tTableArray, isOverwrite);
+    } catch (IOException e) {
+      System.err.println("ERROR: " + e.getMessage());
+      return -1;
+    }
+    return 0;
+  }
+
+  @Override
+  protected void addOptions() {
+  }
+
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    this.cmd = cmd;
+  }
+
+  @Override
+  protected int doWork() throws Exception {
+    init();
+    return parseAndRun(cmd.getArgs());
+  }
+
+  public static void main(String[] args) throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    int ret = ToolRunner.run(conf, new BackupDriver(), args);
+    System.exit(ret);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
new file mode 100644
index 0000000..5b8a151
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupClient;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.BackupUtility;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Backup HBase tables locally or on a remote cluster Serve as client entry point for the following
+ * features: - Full Backup provide local and remote back/restore for a list of tables - Incremental
+ * backup to build on top of full backup as daily/weekly backup - Convert incremental backup WAL
+ * files into hfiles - Merge several backup images into one(like merge weekly into monthly) - Add
+ * and remove table to and from Backup image - Cancel a backup process - Describe information of
+ * a backup image
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class BackupClientImpl implements BackupClient {
+  private static final Log LOG = LogFactory.getLog(BackupClientImpl.class);
+  private Configuration conf;
+  private BackupManager backupManager;
+
+  public BackupClientImpl() {
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  /**
+   * Prepare and submit Backup request
+   * @param backupId : backup_timestame (something like backup_1398729212626)
+   * @param backupType : full or incremental
+   * @param tableList : tables to be backuped
+   * @param targetRootDir : specified by user
+   * @throws IOException exception
+   */
+  protected void requestBackup(String backupId, BackupType backupType, List<TableName> tableList,
+      String targetRootDir) throws IOException {
+
+    BackupContext backupContext = null;
+
+    HBaseAdmin hbadmin = null;
+    Connection conn = null;
+    try {
+      backupManager = new BackupManager(conf);
+      if (backupType == BackupType.INCREMENTAL) {
+        Set<TableName> incrTableSet = backupManager.getIncrementalBackupTableSet();
+        if (incrTableSet.isEmpty()) {
+          LOG.warn("Incremental backup table set contains no table.\n"
+              + "Use 'backup create full' or 'backup stop' to \n "
+              + "change the tables covered by incremental backup.");
+          throw new DoNotRetryIOException("No table covered by incremental backup.");
+        }
+
+        LOG.info("Incremental backup for the following table set: " + incrTableSet);
+        tableList = Lists.newArrayList(incrTableSet);
+      }
+
+      // check whether table exists first before starting real request
+      if (tableList != null) {
+        ArrayList<TableName> nonExistingTableList = null;
+        conn = ConnectionFactory.createConnection(conf);
+        hbadmin = (HBaseAdmin) conn.getAdmin();
+        for (TableName tableName : tableList) {
+          if (!hbadmin.tableExists(tableName)) {
+            if (nonExistingTableList == null) {
+              nonExistingTableList = new ArrayList<>();
+            }
+            nonExistingTableList.add(tableName);
+          }
+        }
+        if (nonExistingTableList != null) {
+          if (backupType == BackupType.INCREMENTAL ) {
+            LOG.warn("Incremental backup table set contains non-exising table: "
+                + nonExistingTableList);
+          } else {
+            // Throw exception only in full mode - we try to backup non-existing table
+            throw new DoNotRetryIOException("Non-existing tables found in the table list: "
+                + nonExistingTableList);
+          }
+        }
+      }
+
+      // if any target table backup dir already exist, then no backup action taken
+      if (tableList != null) {
+        for (TableName table : tableList) {
+          String targetTableBackupDir =
+              HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
+          Path targetTableBackupDirPath = new Path(targetTableBackupDir);
+          FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conf);
+          if (outputFs.exists(targetTableBackupDirPath)) {
+            throw new DoNotRetryIOException("Target backup directory " + targetTableBackupDir
+              + " exists already.");
+          }
+        }
+      }
+      backupContext =
+          backupManager.createBackupContext(backupId, backupType, tableList, targetRootDir);
+      backupManager.initialize();
+      backupManager.dispatchRequest(backupContext);
+    } catch (BackupException e) {
+      // suppress the backup exception wrapped within #initialize or #dispatchRequest, backup
+      // exception has already been handled normally
+      LOG.error("Backup Exception ", e);
+    } finally {
+      if (hbadmin != null) {
+        hbadmin.close();
+      }
+      if (conn != null) {
+        conn.close();
+      }
+    }
+  }
+
+  @Override
+  public String create(BackupType backupType, List<TableName> tableList, String backupRootPath)
+      throws IOException {
+
+    String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime();
+    BackupUtility.checkTargetDir(backupRootPath, conf);
+
+    // table list specified for backup, trigger backup on specified tables
+    try {
+      requestBackup(backupId, backupType, tableList, backupRootPath);
+    } catch (RuntimeException e) {
+      String errMsg = e.getMessage();
+      if (errMsg != null
+          && (errMsg.startsWith("Non-existing tables found") || errMsg
+              .startsWith("Snapshot is not found"))) {
+        LOG.error(errMsg + ", please check your command");
+        throw e;
+      } else {
+        throw e;
+      }
+    } finally{
+      if(backupManager != null) {
+        backupManager.close();
+      }
+    }
+    return backupId;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
new file mode 100644
index 0000000..56e26fa
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import org.apache.commons.cli.CommandLine;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hbase.backup.BackupClient;
+import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+import com.google.common.collect.Lists;
+
+/**
+ * General backup commands, options and usage messages
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BackupCommands {
+
+  private static final String USAGE = "Usage: hbase backup COMMAND\n"
+      + "where COMMAND is one of:\n" + "  create     create a new backup image\n"
+      + "Enter \'help COMMAND\' to see help message for each command\n";
+
+  private static final String CREATE_CMD_USAGE =
+      "Usage: hbase backup create <type> <backup_root_path> [tables] [-convert] "
+          + "\n" + " type          \"full\" to create a full backup image;\n"
+          + "               \"incremental\" to create an incremental backup image\n"
+          + " backup_root_path   The full root path to store the backup image,\n"
+          + "                    the prefix can be hdfs, webhdfs, gpfs, etc\n" + " Options:\n"
+          + "   tables      If no tables (\"\") are specified, all tables are backed up. "
+          + "Otherwise it is a\n" + "               comma separated list of tables.\n"
+          + "   -convert    For an incremental backup, convert WAL files to HFiles\n";
+
+  public static abstract class Command extends Configured {
+    Command(Configuration conf) {
+      super(conf);
+    }
+    public abstract void execute() throws IOException;
+  }
+
+  private BackupCommands() {
+    throw new AssertionError("Instantiating utility class...");
+  }
+
+  public static Command createCommand(Configuration conf, BackupCommand type, CommandLine cmdline) {
+    Command cmd = null;
+    switch (type) {
+      case CREATE:
+        cmd = new CreateCommand(conf, cmdline);
+        break;
+      case HELP:
+      default:
+        cmd = new HelpCommand(conf, cmdline);
+        break;
+    }
+    return cmd;
+  }
+
+  private static class CreateCommand extends Command {
+    CommandLine cmdline;
+
+    CreateCommand(Configuration conf, CommandLine cmdline) {
+      super(conf);
+      this.cmdline = cmdline;
+    }
+
+    @Override
+    public void execute() throws IOException {
+      if (cmdline == null || cmdline.getArgs() == null) {
+        System.out.println("ERROR: missing arguments");
+        System.out.println(CREATE_CMD_USAGE);
+        System.exit(-1);
+      }
+      String[] args = cmdline.getArgs();
+      if (args.length < 2 || args.length > 3) {
+        System.out.println("ERROR: wrong number of arguments");
+        System.out.println(CREATE_CMD_USAGE);
+        System.exit(-1);
+      }
+
+      if (!BackupType.FULL.toString().equalsIgnoreCase(args[0])
+          && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[0])) {
+        System.out.println("ERROR: invalid backup type");
+        System.out.println(CREATE_CMD_USAGE);
+        System.exit(-1);
+      }
+
+      String tables = (args.length == 3) ? args[2] : null;
+
+      try {
+        BackupClient client = BackupRestoreFactory.getBackupClient(getConf());
+        client.create(BackupType.valueOf(args[0].toUpperCase()),
+          Lists.newArrayList(BackupUtil.parseTableNames(tables)), args[1]);
+      } catch (RuntimeException e) {
+        System.out.println("ERROR: " + e.getMessage());
+        System.exit(-1);
+      }
+    }
+  }
+
+  private static class HelpCommand extends Command {
+    CommandLine cmdline;
+
+    HelpCommand(Configuration conf, CommandLine cmdline) {
+      super(conf);
+      this.cmdline = cmdline;
+    }
+
+    @Override
+    public void execute() throws IOException {
+      if (cmdline == null) {
+        System.out.println(USAGE);
+        System.exit(0);
+      }
+
+      String[] args = cmdline.getArgs();
+      if (args == null || args.length == 0) {
+        System.out.println(USAGE);
+        System.exit(0);
+      }
+
+      if (args.length != 1) {
+        System.out.println("Only support check help message of a single command type");
+        System.out.println(USAGE);
+        System.exit(0);
+      }
+
+      String type = args[0];
+
+      if (BackupCommand.CREATE.name().equalsIgnoreCase(type)) {
+        System.out.println(CREATE_CMD_USAGE);
+      } // other commands will be supported in future jira
+      System.exit(0);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
new file mode 100644
index 0000000..1be0c3b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
@@ -0,0 +1,382 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus;
+
+/**
+ * An object to encapsulate the information for each backup request
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupContext {
+
+  public Map<TableName, BackupStatus> getBackupStatusMap() {
+    return backupStatusMap;
+  }
+
+  public void setBackupStatusMap(Map<TableName, BackupStatus> backupStatusMap) {
+    this.backupStatusMap = backupStatusMap;
+  }
+
+  public HashMap<TableName, HashMap<String, Long>> getTableSetTimestampMap() {
+    return tableSetTimestampMap;
+  }
+
+  public void setTableSetTimestampMap(
+      HashMap<TableName, HashMap<String, Long>> tableSetTimestampMap) {
+    this.tableSetTimestampMap = tableSetTimestampMap;
+  }
+
+  public String getHlogTargetDir() {
+    return hlogTargetDir;
+  }
+
+  public void setType(BackupType type) {
+    this.type = type;
+  }
+
+  public void setTargetRootDir(String targetRootDir) {
+    this.targetRootDir = targetRootDir;
+  }
+
+  public void setTotalBytesCopied(long totalBytesCopied) {
+    this.totalBytesCopied = totalBytesCopied;
+  }
+
+  public void setCancelled(boolean cancelled) {
+    this.state = BackupState.CANCELLED;;
+  }
+
+  // backup id: a timestamp when we request the backup
+  private String backupId;
+
+  // backup type, full or incremental
+  private BackupType type;
+
+  // target root directory for storing the backup files
+  private String targetRootDir;
+
+  // overall backup state
+  private BackupHandler.BackupState state;
+
+  // overall backup phase
+  private BackupHandler.BackupPhase phase;
+
+  // overall backup failure message
+  private String failedMsg;
+
+  // backup status map for all tables
+  private Map<TableName, BackupStatus> backupStatusMap;
+
+  // actual start timestamp of the backup process
+  private long startTs;
+
+  // actual end timestamp of the backup process, could be fail or complete
+  private long endTs;
+
+  // the total bytes of incremental logs copied
+  private long totalBytesCopied;
+
+  // for incremental backup, the location of the backed-up hlogs
+  private String hlogTargetDir = null;
+
+  // incremental backup file list
+  transient private List<String> incrBackupFileList;
+
+  // new region server log timestamps for table set after distributed log roll
+  // key - table name, value - map of RegionServer hostname -> last log rolled timestamp
+  transient private HashMap<TableName, HashMap<String, Long>> tableSetTimestampMap;
+
+  // backup progress in %% (0-100)
+
+  private int progress;
+
+  public BackupContext() {
+  }
+
+  public BackupContext(String backupId, BackupType type, TableName[] tables, String targetRootDir) {
+    backupStatusMap = new HashMap<TableName, BackupStatus>();
+
+    this.backupId = backupId;
+    this.type = type;
+    this.targetRootDir = targetRootDir;
+
+    this.addTables(tables);
+
+    if (type == BackupType.INCREMENTAL) {
+      setHlogTargetDir(HBackupFileSystem.getLogBackupDir(targetRootDir, backupId));
+    }
+
+    this.startTs = 0;
+    this.endTs = 0;
+  }
+
+  /**
+   * Set progress string
+   * @param msg progress message
+   */
+
+  public void setProgress(int p) {
+    this.progress = p;
+  }
+
+  /**
+   * Get current progress
+   */
+  public int getProgress() {
+    return progress;
+  }
+
+
+  /**
+   * Has been marked as cancelled or not.
+   * @return True if marked as cancelled
+   */
+  public boolean isCancelled() {
+    return this.state == BackupState.CANCELLED;
+  }
+
+  public String getBackupId() {
+    return backupId;
+  }
+
+  public void setBackupId(String backupId) {
+    this.backupId = backupId;
+  }
+
+  public BackupStatus getBackupStatus(TableName table) {
+    return this.backupStatusMap.get(table);
+  }
+
+  public String getFailedMsg() {
+    return failedMsg;
+  }
+
+  public void setFailedMsg(String failedMsg) {
+    this.failedMsg = failedMsg;
+  }
+
+  public long getStartTs() {
+    return startTs;
+  }
+
+  public void setStartTs(long startTs) {
+    this.startTs = startTs;
+  }
+
+  public long getEndTs() {
+    return endTs;
+  }
+
+  public void setEndTs(long endTs) {
+    this.endTs = endTs;
+  }
+
+  public long getTotalBytesCopied() {
+    return totalBytesCopied;
+  }
+
+  public BackupHandler.BackupState getState() {
+    return state;
+  }
+
+  public void setState(BackupHandler.BackupState flag) {
+    this.state = flag;
+  }
+
+  public BackupHandler.BackupPhase getPhase() {
+    return phase;
+  }
+
+  public void setPhase(BackupHandler.BackupPhase phase) {
+    this.phase = phase;
+  }
+
+  public BackupType getType() {
+    return type;
+  }
+
+  public void setSnapshotName(TableName table, String snapshotName) {
+    this.backupStatusMap.get(table).setSnapshotName(snapshotName);
+  }
+
+  public String getSnapshotName(TableName table) {
+    return this.backupStatusMap.get(table).getSnapshotName();
+  }
+
+  public List<String> getSnapshotNames() {
+    List<String> snapshotNames = new ArrayList<String>();
+    for (BackupStatus backupStatus : this.backupStatusMap.values()) {
+      snapshotNames.add(backupStatus.getSnapshotName());
+    }
+    return snapshotNames;
+  }
+
+  public Set<TableName> getTables() {
+    return this.backupStatusMap.keySet();
+  }
+
+  public List<TableName> getTableNames() {
+    return new ArrayList<TableName>(backupStatusMap.keySet());
+  }
+
+  public void addTables(TableName[] tables) {
+    for (TableName table : tables) {
+      BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId);
+      this.backupStatusMap.put(table, backupStatus);
+    }
+  }
+
+  public String getTargetRootDir() {
+    return targetRootDir;
+  }
+
+  public void setHlogTargetDir(String hlogTagetDir) {
+    this.hlogTargetDir = hlogTagetDir;
+  }
+
+  public String getHLogTargetDir() {
+    return hlogTargetDir;
+  }
+
+  public List<String> getIncrBackupFileList() {
+    return incrBackupFileList;
+  }
+
+  public List<String> setIncrBackupFileList(List<String> incrBackupFileList) {
+    this.incrBackupFileList = incrBackupFileList;
+    return this.incrBackupFileList;
+  }
+
+  /**
+   * Set the new region server log timestamps after distributed log roll
+   * @param newTableSetTimestampMap table timestamp map
+   */
+  public void setIncrTimestampMap(HashMap<TableName,
+      HashMap<String, Long>> newTableSetTimestampMap) {
+    this.tableSetTimestampMap = newTableSetTimestampMap;
+  }
+
+  /**
+   * Get new region server log timestamps after distributed log roll
+   * @return new region server log timestamps
+   */
+  public HashMap<TableName, HashMap<String, Long>> getIncrTimestampMap() {
+    return this.tableSetTimestampMap;
+  }
+
+  public TableName getTableBySnapshot(String snapshotName) {
+    for (Entry<TableName, BackupStatus> entry : this.backupStatusMap.entrySet()) {
+      if (snapshotName.equals(entry.getValue().getSnapshotName())) {
+        return entry.getKey();
+      }
+    }
+    return null;
+  }
+
+  public byte[] toByteArray() throws IOException {
+    BackupProtos.BackupContext.Builder builder =
+        BackupProtos.BackupContext.newBuilder();
+    builder.setBackupId(getBackupId());
+    setBackupStatusMap(builder);
+    builder.setEndTs(getEndTs());
+    if(getFailedMsg() != null){
+      builder.setFailedMessage(getFailedMsg());
+    }
+    if(getState() != null){
+      builder.setState(BackupProtos.BackupContext.BackupState.valueOf(getState().name()));
+    }
+    if(getPhase() != null){
+      builder.setPhase(BackupProtos.BackupContext.BackupPhase.valueOf(getPhase().name()));
+    }
+    if(getHLogTargetDir() != null){
+      builder.setHlogTargetDir(getHLogTargetDir());
+    }
+
+    builder.setProgress(getProgress());
+    builder.setStartTs(getStartTs());
+    builder.setTargetRootDir(getTargetRootDir());
+    builder.setTotalBytesCopied(getTotalBytesCopied());
+    builder.setType(BackupProtos.BackupType.valueOf(getType().name()));
+    byte[] data = builder.build().toByteArray();
+    return data;
+  }
+
+  private void setBackupStatusMap(Builder builder) {
+    for (Entry<TableName, BackupStatus> entry: backupStatusMap.entrySet()) {
+      builder.addTableBackupStatus(entry.getValue().toProto());
+    }
+  }
+
+  public static BackupContext fromByteArray(byte[] data) throws IOException {
+
+    BackupContext context = new BackupContext();
+    BackupProtos.BackupContext proto = BackupProtos.BackupContext.parseFrom(data);
+    context.setBackupId(proto.getBackupId());
+    context.setBackupStatusMap(toMap(proto.getTableBackupStatusList()));
+    context.setEndTs(proto.getEndTs());
+    if(proto.hasFailedMessage()) {
+      context.setFailedMsg(proto.getFailedMessage());
+    }
+    if(proto.hasState()) {
+      context.setState(BackupHandler.BackupState.valueOf(proto.getState().name()));
+    }
+    if(proto.hasHlogTargetDir()) {
+      context.setHlogTargetDir(proto.getHlogTargetDir());
+    }
+    if(proto.hasPhase()) {
+      context.setPhase(BackupHandler.BackupPhase.valueOf(proto.getPhase().name()));
+    }
+    if(proto.hasProgress()) {
+      context.setProgress(proto.getProgress());
+    }
+    context.setStartTs(proto.getStartTs());
+    context.setTargetRootDir(proto.getTargetRootDir());
+    context.setTotalBytesCopied(proto.getTotalBytesCopied());
+    context.setType(BackupType.valueOf(proto.getType().name()));
+    return context;
+  }
+
+  private static Map<TableName, BackupStatus> toMap(List<TableBackupStatus> list) {
+    HashMap<TableName, BackupStatus> map = new HashMap<>();
+    for (TableBackupStatus tbs : list){
+      map.put(ProtobufUtil.toTableName(tbs.getTable()), BackupStatus.convert(tbs));
+    }
+    return map;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java
new file mode 100644
index 0000000..1e8da63
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface BackupCopyService extends Configurable {
+  static enum Type {
+    FULL, INCREMENTAL
+  }
+
+  public int copy(BackupContext backupContext, BackupManager backupManager, Configuration conf,
+      BackupCopyService.Type copyType, String[] options) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java
new file mode 100644
index 0000000..af70cc8
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Backup exception
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupException extends HBaseIOException {
+  private BackupContext description;
+
+  /**
+   * Some exception happened for a backup and don't even know the backup that it was about
+   * @param msg Full description of the failure
+   */
+  public BackupException(String msg) {
+    super(msg);
+  }
+
+  /**
+   * Some exception happened for a backup with a cause
+   * @param cause the cause
+   */
+  public BackupException(Throwable cause) {
+    super(cause);
+  }
+
+  /**
+   * Exception for the given backup that has no previous root cause
+   * @param msg reason why the backup failed
+   * @param desc description of the backup that is being failed
+   */
+  public BackupException(String msg, BackupContext desc) {
+    super(msg);
+    this.description = desc;
+  }
+
+  /**
+   * Exception for the given backup due to another exception
+   * @param msg reason why the backup failed
+   * @param cause root cause of the failure
+   * @param desc description of the backup that is being failed
+   */
+  public BackupException(String msg, Throwable cause, BackupContext desc) {
+    super(msg, cause);
+    this.description = desc;
+  }
+
+  /**
+   * Exception when the description of the backup cannot be determined, due to some other root
+   * cause
+   * @param message description of what caused the failure
+   * @param e root cause
+   */
+  public BackupException(String message, Exception e) {
+    super(message, e);
+  }
+
+  public BackupContext getBackupContext() {
+    return this.description;
+  }
+
+}


Mime
View raw message