hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From te...@apache.org
Subject [3/6] hbase git commit: HBASE-14140 HBase Backup/Restore Phase 3: Enhance HBaseAdmin API to include backup/restore - related API (Vladimir)
Date Wed, 25 May 2016 20:18:49 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
deleted file mode 100644
index 8c3c2be..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ /dev/null
@@ -1,802 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
-
-/**
- * This class provides 'hbase:backup' table API
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class BackupSystemTable implements Closeable {
-  
-  static class WALItem {
-    String backupId;
-    String walFile;
-    String backupRoot;
-    
-    WALItem(String backupId, String walFile, String backupRoot)
-    {
-      this.backupId = backupId;
-      this.walFile = walFile;
-      this.backupRoot = backupRoot;
-    }
-
-    public String getBackupId() {
-      return backupId;
-    }
-
-    public String getWalFile() {
-      return walFile;
-    }
-
-    public String getBackupRoot() {
-      return backupRoot;
-    }
-    
-    public String toString() {
-      return backupRoot+"/"+backupId + "/" + walFile;
-    }
-    
-  }
-  
-  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
-  private final static TableName tableName = TableName.BACKUP_TABLE_NAME;  
-  // Stores backup sessions (contexts)
-  final static byte[] SESSIONS_FAMILY = "session".getBytes();
-  // Stores other meta 
-  final static byte[] META_FAMILY = "meta".getBytes();
-  // Connection to HBase cluster, shared
-  // among all instances
-  private final Connection connection;
-    
-  public BackupSystemTable(Connection conn) throws IOException {
-    this.connection = conn;
-  }
-
- 
-  public void close() {
-     // do nothing 
-  }
-
-  /**
-   * Updates status (state) of a backup session in hbase:backup table
-   * @param context context
-   * @throws IOException exception
-   */
-  public void updateBackupInfo(BackupInfo context) throws IOException {
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("update backup status in hbase:backup for: " + context.getBackupId()
-        + " set status=" + context.getState());
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Put put = BackupSystemTableHelper.createPutForBackupContext(context);
-      table.put(put);
-    }
-  }
-
-  /**
-   * Deletes backup status from hbase:backup table
-   * @param backupId backup id
-   * @return true, if operation succeeded, false - otherwise 
-   * @throws IOException exception
-   */
-
-  public void deleteBackupInfo(String backupId) throws IOException {
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("delete backup status in hbase:backup for " + backupId);
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Delete del = BackupSystemTableHelper.createDeleteForBackupInfo(backupId);
-      table.delete(del);
-    }
-  }
-
-  /**
-   * Reads backup status object (instance of BackupContext) from hbase:backup table
-   * @param backupId - backupId
-   * @return Current status of backup session or null
-   */
-
-  public BackupInfo readBackupInfo(String backupId) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read backup status from hbase:backup for: " + backupId);
-    }
-
-    try (Table table = connection.getTable(tableName)) {
-      Get get = BackupSystemTableHelper.createGetForBackupContext(backupId);
-      Result res = table.get(get);
-      if(res.isEmpty()){
-        return null;
-      }
-      return BackupSystemTableHelper.resultToBackupInfo(res);
-    }
-  }
-
-  /**
-   * Read the last backup start code (timestamp) of last successful backup. Will return null if
-   * there is no start code stored on hbase or the value is of length 0. These two cases indicate
-   * there is no successful backup completed so far.
-   * @param backupRoot root directory path to backup 
-   * @return the timestamp of last successful backup
-   * @throws IOException exception
-   */
-  public String readBackupStartCode(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read backup start code from hbase:backup");
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Get get = BackupSystemTableHelper.createGetForStartCode(backupRoot);
-      Result res = table.get(get);
-      if (res.isEmpty()) {
-        return null;
-      }
-      Cell cell = res.listCells().get(0);
-      byte[] val = CellUtil.cloneValue(cell);
-      if (val.length == 0){
-        return null;
-      }
-      return new String(val);
-    }
-  }
-
-  /**
-   * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte.
-   * @param startCode start code
-   * @throws IOException exception
-   */
-  public void writeBackupStartCode(Long startCode, String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("write backup start code to hbase:backup " + startCode);
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Put put = BackupSystemTableHelper.createPutForStartCode(startCode.toString(), backupRoot);
-      table.put(put);
-    }
-  }
-
-  /**
-   * Get the Region Servers log information after the last log roll from hbase:backup.
-   * @return RS log info
-   * @throws IOException exception
-   */
-  public HashMap<String, Long> readRegionServerLastLogRollResult(String backupRoot)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read region server last roll log result to hbase:backup");
-    }
-
-    Scan scan = BackupSystemTableHelper.createScanForReadRegionServerLastLogRollResult(backupRoot);
-    scan.setMaxVersions(1);
-
-    try (Table table = connection.getTable(tableName);
-        ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
-      HashMap<String, Long> rsTimestampMap = new HashMap<String, Long>();
-      while ((res = scanner.next()) != null) {
-        res.advance();
-        Cell cell = res.current();
-        byte[] row = CellUtil.cloneRow(cell);
-        String server =
-            BackupSystemTableHelper.getServerNameForReadRegionServerLastLogRollResult(row);
-        byte[] data = CellUtil.cloneValue(cell);
-        rsTimestampMap.put(server, Long.parseLong(new String(data)));
-      }
-      return rsTimestampMap;
-    }
-  }
-
-  /**
-   * Writes Region Server last roll log result (timestamp) to hbase:backup table
-   * @param server - Region Server name
-   * @param timestamp - last log timestamp
-   * @throws IOException exception
-   */
-  public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("write region server last roll log result to hbase:backup");
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Put put =
-          BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server,ts,backupRoot);
-      table.put(put);
-    }
-  }
-
-  /**
-   * Get all completed backup information (in desc order by time)
-   * @param onlyCompeleted, true, if only successfully completed sessions
-   * @return history info of BackupCompleteData
-   * @throws IOException exception
-   */
-  public ArrayList<BackupInfo> getBackupHistory(boolean onlyCompleted) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get backup history from hbase:backup");
-    }
-    Scan scan = BackupSystemTableHelper.createScanForBackupHistory();
-    scan.setMaxVersions(1);
-
-    ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
-    try (Table table = connection.getTable(tableName);
-        ResultScanner scanner = table.getScanner(scan)) {
-
-      Result res = null;
-      while ((res = scanner.next()) != null) {
-        res.advance();
-        BackupInfo context = BackupSystemTableHelper.cellToBackupInfo(res.current());
-        if(onlyCompleted && context.getState() != BackupState.COMPLETE){
-          continue;
-        }
-        list.add(context);
-      }
-      return BackupUtil.sortHistoryListDesc(list);
-    }
-  }
-
-  public ArrayList<BackupInfo> getBackupHistory() throws IOException {
-    return getBackupHistory(false);
-  }
-  
-  /**
-   * Get all backup session with a given status (in desc order by time)
-   * @param status status
-   * @return history info of backup contexts
-   * @throws IOException exception
-   */
-  public ArrayList<BackupInfo> getBackupContexts(BackupState status) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get backup contexts from hbase:backup");
-    }
-
-    Scan scan = BackupSystemTableHelper.createScanForBackupHistory();
-    scan.setMaxVersions(1);
-    ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
-
-    try (Table table = connection.getTable(tableName);
-        ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
-      while ((res = scanner.next()) != null) {
-        res.advance();
-        BackupInfo context = BackupSystemTableHelper.cellToBackupInfo(res.current());
-        if (context.getState() != status){
-          continue;
-        }
-        list.add(context);
-      }
-      return list;
-    }
-  }
-
-  /**
-   * Write the current timestamps for each regionserver to hbase:backup after a successful full or
-   * incremental backup. The saved timestamp is of the last log file that was backed up already.
-   * @param tables tables
-   * @param newTimestamps timestamps
-   * @throws IOException exception
-   */
-  public void writeRegionServerLogTimestamp(Set<TableName> tables,
-      HashMap<String, Long> newTimestamps, String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("write RS log ts to HBASE_BACKUP");
-    }
-    List<Put> puts = new ArrayList<Put>();
-    for (TableName table : tables) {
-      byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray();
-      Put put = 
-          BackupSystemTableHelper.createPutForWriteRegionServerLogTimestamp(table, smapData, backupRoot);
-      puts.add(put);
-    }
-    try (Table table = connection.getTable(tableName)) {
-      table.put(puts);
-    }
-  }
-
-  /**
-   * Read the timestamp for each region server log after the last successful backup. Each table has
-   * its own set of the timestamps. The info is stored for each table as a concatenated string of
-   * rs->timestapmp
-   * @return the timestamp for each region server. key: tableName value:
-   *         RegionServer,PreviousTimeStamp
-   * @throws IOException exception
-   */
-  public HashMap<TableName, HashMap<String, Long>> readLogTimestampMap(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read RS log ts from HBASE_BACKUP");
-    }
-
-    HashMap<TableName, HashMap<String, Long>> tableTimestampMap =
-        new HashMap<TableName, HashMap<String, Long>>();
-
-    Scan scan = BackupSystemTableHelper.createScanForReadLogTimestampMap(backupRoot);
-    try (Table table = connection.getTable(tableName);
-        ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
-      while ((res = scanner.next()) != null) {
-        res.advance();
-        Cell cell = res.current();
-        byte[] row = CellUtil.cloneRow(cell);
-        String tabName = BackupSystemTableHelper.getTableNameForReadLogTimestampMap(row);
-        TableName tn = TableName.valueOf(tabName);
-        HashMap<String, Long> lastBackup = new HashMap<String, Long>();
-        byte[] data = CellUtil.cloneValue(cell);
-
-        if (data == null) {
-          throw new IOException("Data of last backup data from HBASE_BACKUP "
-              + "is empty. Create a backup first.");
-        }
-        if (data != null && data.length > 0) {
-          lastBackup =
-              fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data));
-          tableTimestampMap.put(tn, lastBackup);
-        }
-      }
-      return tableTimestampMap;
-    }
-  }
-
-  private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table,
-      Map<String, Long> map) {
-    BackupProtos.TableServerTimestamp.Builder tstBuilder =
-        BackupProtos.TableServerTimestamp.newBuilder();
-    tstBuilder.setTable(ProtobufUtil.toProtoTableName(table));
-
-    for(Entry<String, Long> entry: map.entrySet()) {
-      BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder();
-      builder.setServer(entry.getKey());
-      builder.setTimestamp(entry.getValue());
-      tstBuilder.addServerTimestamp(builder.build());
-    }
-
-    return tstBuilder.build();
-  }
-
-  private HashMap<String, Long> fromTableServerTimestampProto(
-      BackupProtos.TableServerTimestamp proto) {
-    HashMap<String, Long> map = new HashMap<String, Long> ();
-    List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
-    for(BackupProtos.ServerTimestamp st: list) {
-      map.put(st.getServer(), st.getTimestamp());
-    }
-    return map;
-  }
-
-  /**
-   * Return the current tables covered by incremental backup.
-   * @return set of tableNames
-   * @throws IOException exception
-   */
-  public  Set<TableName> getIncrementalBackupTableSet(String backupRoot)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get incr backup table set from hbase:backup");
-    }
-    TreeSet<TableName> set = new TreeSet<>();
-
-    try (Table table = connection.getTable(tableName)) {
-      Get get = BackupSystemTableHelper.createGetForIncrBackupTableSet(backupRoot);
-      Result res = table.get(get);
-      if (res.isEmpty()) {
-        return set;
-      }
-      List<Cell> cells = res.listCells();
-      for (Cell cell : cells) {
-        // qualifier = table name - we use table names as qualifiers
-        set.add(TableName.valueOf(CellUtil.cloneQualifier(cell)));
-      }
-      return set;
-    }
-  }
-
-  /**
-   * Add tables to global incremental backup set
-   * @param tables - set of tables
-   * @throws IOException exception
-   */
-  public void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("add incr backup table set to hbase:backup");
-      for (TableName table : tables) {
-        LOG.debug(table);
-      }
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Put put = BackupSystemTableHelper.createPutForIncrBackupTableSet(tables, backupRoot);
-      table.put(put);
-    }
-  }
-
-  /**
-   * Register WAL files as eligible for deletion
-   * @param files files
-   * @throws IOException exception
-   */
-  public void addWALFiles(List<String> files, String backupId, 
-      String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("add WAL files to hbase:backup: "+backupId +" "+backupRoot);
-      for(String f: files){
-        LOG.debug("add :"+f);
-      }
-    }
-    try (Table table = connection.getTable(tableName)) {
-      List<Put> puts = 
-          BackupSystemTableHelper.createPutsForAddWALFiles(files, backupId, backupRoot);
-      table.put(puts);
-    }
-  }
-
-  /**
-   * Register WAL files as eligible for deletion
-   * @param files files
-   * @throws IOException exception
-   */
-  public Iterator<WALItem> getWALFilesIterator(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get WAL files from hbase:backup");
-    }
-    final Table table = connection.getTable(tableName);
-    Scan scan = BackupSystemTableHelper.createScanForGetWALs(backupRoot);
-    final ResultScanner scanner = table.getScanner(scan);
-    final Iterator<Result> it = scanner.iterator();
-    return new Iterator<WALItem>() {
-
-      @Override
-      public boolean hasNext() {
-        boolean next = it.hasNext();
-        if (!next) {
-          // close all
-          try {
-            scanner.close();
-            table.close();
-          } catch (Exception e) {
-            LOG.error(e);
-          }
-        }
-        return next;
-      }
-
-      @Override
-      public WALItem next() {
-        Result next = it.next();
-        List<Cell> cells = next.listCells();
-        byte[] buf = cells.get(0).getValueArray();
-        int len = cells.get(0).getValueLength();
-        int offset = cells.get(0).getValueOffset();
-        String backupId = new String(buf, offset, len);
-        buf = cells.get(1).getValueArray();
-        len = cells.get(1).getValueLength();
-        offset = cells.get(1).getValueOffset();
-        String walFile = new String(buf, offset, len);
-        buf = cells.get(2).getValueArray();
-        len = cells.get(2).getValueLength();
-        offset = cells.get(2).getValueOffset();
-        String backupRoot = new String(buf, offset, len);    
-        return new WALItem(backupId, walFile, backupRoot);
-      }
-
-      @Override
-      public void remove() {
-        // not implemented
-        throw new RuntimeException("remove is not supported");
-      }
-    };
-
-  }
-
-  /**
-   * Check if WAL file is eligible for deletion
-   * Future: to support all backup destinations
-   * @param file file
-   * @return true, if - yes.
-   * @throws IOException exception
-   */
-  public boolean checkWALFile(String file) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Check if WAL file has been already backuped in hbase:backup "+ file);
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Get get = BackupSystemTableHelper.createGetForCheckWALFile(file);
-      Result res = table.get(get);
-      if (res.isEmpty()){
-        return false;
-      }
-      return true;
-    }
-  }
-
-  /**
-   * Checks if we have at least one backup session in hbase:backup This API is used by
-   * BackupLogCleaner
-   * @return true, if - at least one session exists in hbase:backup table
-   * @throws IOException exception
-   */
-  public boolean hasBackupSessions() throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("has backup sessions from hbase:backup");
-    }
-    boolean result = false;
-    Scan scan = BackupSystemTableHelper.createScanForBackupHistory();
-    scan.setMaxVersions(1);
-    scan.setCaching(1);
-    try (Table table = connection.getTable(tableName);
-        ResultScanner scanner = table.getScanner(scan)) {
-      if (scanner.next() != null) {
-        result = true;
-      }
-      return result;
-    }
-  }
-  
-  /**
-   * BACKUP SETS
-   */
-  
-  /**
-   * Get backup set list
-   * @return backup set list
-   * @throws IOException
-   */
-  public List<String> listBackupSets() throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" backup set list");
-    }
-    List<String> list = new ArrayList<String>();
-    Table table = null;
-    ResultScanner scanner = null;
-    try {
-      table = connection.getTable(tableName);
-      Scan scan = BackupSystemTableHelper.createScanForBackupSetList();
-      scan.setMaxVersions(1);
-      scanner = table.getScanner(scan);
-      Result res = null;
-     while ((res = scanner.next()) != null) {
-       res.advance();
-       list.add(BackupSystemTableHelper.cellKeyToBackupSetName(res.current()));
-     }
-     return list;
-   } finally {
-     if (table != null) {
-       table.close();
-     }
-   }
- }
- 
- /**
-  * Get backup set description (list of tables)
-  * @param setName set's name
-  * @return list of tables in a backup set 
-  * @throws IOException
-  */
- public List<TableName> describeBackupSet(String name) throws IOException {
-   if (LOG.isDebugEnabled()) {
-     LOG.debug(" backup set describe: "+name);
-   }
-   Table table = null;
-   try {
-     table = connection.getTable(tableName);
-     Get get = BackupSystemTableHelper.createGetForBackupSet(name);
-     Result res = table.get(get);
-     if(res.isEmpty()) return new ArrayList<TableName>();
-     res.advance();
-     String[] tables = 
-         BackupSystemTableHelper.cellValueToBackupSet(res.current());
-     return toList(tables);
-   } finally {
-     if (table != null) {
-       table.close();
-     }
-   }
- }
- 
- private List<TableName> toList(String[] tables)
- {
-   List<TableName> list = new ArrayList<TableName>(tables.length);
-   for(String name: tables) {
-     list.add(TableName.valueOf(name));
-   }
-   return list;
- }
- 
- /**
-  * Add backup set (list of tables)
-  * @param name - set name
-  * @param tables - list of tables, comma-separated
-  * @throws IOException
-  */
- public void addToBackupSet(String name, String[] newTables) throws IOException {
-   if (LOG.isDebugEnabled()) {
-     LOG.debug(" backup set add: "+name);
-   }
-   Table table = null;
-   String[] union = null;
-   try {
-     table = connection.getTable(tableName);
-     Get get = BackupSystemTableHelper.createGetForBackupSet(name);
-     Result res = table.get(get);
-     if(res.isEmpty()) {
-       union = newTables;
-     } else {
-       res.advance();
-       String[] tables = 
-         BackupSystemTableHelper.cellValueToBackupSet(res.current());
-       union = merge(tables, newTables);  
-     }
-     Put put = BackupSystemTableHelper.createPutForBackupSet(name, union);
-     table.put(put);
-   } finally {
-     if (table != null) {
-       table.close();
-     }
-   }
- }
- 
- private String[] merge(String[] tables, String[] newTables) {
-   List<String> list = new ArrayList<String>();
-   // Add all from tables
-   for(String t: tables){
-     list.add(t);
-   }
-   for(String nt: newTables){
-     if(list.contains(nt)) continue;
-     list.add(nt);
-   }
-   String[] arr = new String[list.size()];
-   list.toArray(arr);
-   return arr;
- }
-
- /**
-  * Remove tables from backup set (list of tables)
-  * @param name - set name
-  * @param tables - list of tables, comma-separated
-  * @throws IOException
-  */
-  public void removeFromBackupSet(String name, String[] toRemove) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" backup set describe: " + name);
-    }
-    Table table = null;
-    String[] disjoint = null;
-    try {
-      table = connection.getTable(tableName);
-      Get get = BackupSystemTableHelper.createGetForBackupSet(name);
-      Result res = table.get(get);
-      if (res.isEmpty()) {
-        return;
-      } else {
-        res.advance();
-        String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current());
-        disjoint = disjoin(tables, toRemove);
-      }
-      if (disjoint.length > 0) {
-        Put put = BackupSystemTableHelper.createPutForBackupSet(name, disjoint);
-        table.put(put);
-      } else {
-        // Delete
-        describeBackupSet(name);
-      }
-    } finally {
-      if (table != null) {
-        table.close();
-      }
-    }
-  }
-
-  private String[] disjoin(String[] tables, String[] toRemove) {
-    List<String> list = new ArrayList<String>();
-    // Add all from tables
-    for (String t : tables) {
-      list.add(t);
-    }
-    for (String nt : toRemove) {
-      if (list.contains(nt)) {
-        list.remove(nt);
-      }
-    }
-    String[] arr = new String[list.size()];
-    list.toArray(arr);
-    return arr;
-  }
-
- /**
-  * Delete backup set 
-  * @param name set's name
-  * @throws IOException
-  */
-  public void deleteBackupSet(String name) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" backup set delete: " + name);
-    }
-    Table table = null;
-    try {
-      table = connection.getTable(tableName);
-      Delete del = BackupSystemTableHelper.createDeleteForBackupSet(name);
-      table.delete(del);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
-    }
-  }
-
-  /**
-   * Get backup system table descriptor
-   * @return descriptor
-   */
-  public static HTableDescriptor getSystemTableDescriptor() {
-    HTableDescriptor tableDesc = new HTableDescriptor(tableName);
-    HColumnDescriptor colSessionsDesc = new HColumnDescriptor(SESSIONS_FAMILY);
-    colSessionsDesc.setMaxVersions(1);
-    // Time to keep backup sessions (secs)
-    Configuration config = HBaseConfiguration.create();
-    int ttl =
-        config.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT);
-    colSessionsDesc.setTimeToLive(ttl);
-    tableDesc.addFamily(colSessionsDesc);
-    HColumnDescriptor colMetaDesc = new HColumnDescriptor(META_FAMILY);
-    //colDesc.setMaxVersions(1);
-    tableDesc.addFamily(colMetaDesc);
-    return tableDesc;
-  }
-
-  public static String getTableNameAsString() {
-    return tableName.getNameAsString();
-  }
-  
-  public static TableName getTableName() {
-    return tableName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
deleted file mode 100644
index 7b82655..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
+++ /dev/null
@@ -1,423 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-
-
-/**
- * A collection for methods used by BackupSystemTable.
- */
-
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class BackupSystemTableHelper {
-
-  /**
-   * hbase:backup schema:
-   * 1. Backup sessions rowkey= "session:" + backupId; value = serialized
-   * BackupContext
-   * 2. Backup start code rowkey = "startcode:" + backupRoot; value = startcode
-   * 3. Incremental backup set rowkey="incrbackupset:" + backupRoot; value=[list of tables]
-   * 4. Table-RS-timestamp map rowkey="trslm:"+ backupRoot+table_name; value = map[RS-> 
-   * last WAL timestamp]
-   * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp
-   * 6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file name
-   */
-
-  private final static String BACKUP_INFO_PREFIX = "session:";
-  private final static String START_CODE_ROW = "startcode:";
-  private final static String INCR_BACKUP_SET = "incrbackupset:";
-  private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm:";
-  private final static String RS_LOG_TS_PREFIX = "rslogts:";
-  private final static String WALS_PREFIX = "wals:";
-  private final static String SET_KEY_PREFIX = "backupset:";
-
-  private final static byte[] EMPTY_VALUE = new byte[] {};
-  
-  // Safe delimiter in a string
-  private final static String NULL = "\u0000";
-
-  private BackupSystemTableHelper() {
-    throw new AssertionError("Instantiating utility class...");
-  }
-
-  /**
-   * Creates Put operation for a given backup context object
-   * @param context backup context
-   * @return put operation
-   * @throws IOException exception
-   */
-  static Put createPutForBackupContext(BackupInfo context) throws IOException {
-    Put put = new Put(rowkey(BACKUP_INFO_PREFIX, context.getBackupId()));
-    put.addColumn(BackupSystemTable.SESSIONS_FAMILY, "context".getBytes(), context.toByteArray());
-    return put;
-  }
-
-  /**
-   * Creates Get operation for a given backup id
-   * @param backupId - backup's ID
-   * @return get operation
-   * @throws IOException exception
-   */
-  static Get createGetForBackupContext(String backupId) throws IOException {
-    Get get = new Get(rowkey(BACKUP_INFO_PREFIX, backupId));
-    get.addFamily(BackupSystemTable.SESSIONS_FAMILY);
-    get.setMaxVersions(1);
-    return get;
-  }
-
-  /**
-   * Creates Delete operation for a given backup id
-   * @param backupId - backup's ID
-   * @return delete operation
-   * @throws IOException exception
-   */
-  public static Delete createDeleteForBackupInfo(String backupId) {
-    Delete del = new Delete(rowkey(BACKUP_INFO_PREFIX, backupId));
-    del.addFamily(BackupSystemTable.SESSIONS_FAMILY);
-    return del;
-  }
-
-  /**
-   * Converts Result to BackupContext
-   * @param res - HBase result
-   * @return backup context instance
-   * @throws IOException exception
-   */
-  static BackupInfo resultToBackupInfo(Result res) throws IOException {
-    res.advance();
-    Cell cell = res.current();
-    return cellToBackupInfo(cell);
-  }
-
-  /**
-   * Creates Get operation to retrieve start code from hbase:backup
-   * @return get operation
-   * @throws IOException exception
-   */
-  static Get createGetForStartCode(String rootPath) throws IOException {    
-    Get get = new Get(rowkey(START_CODE_ROW, rootPath));
-    get.addFamily(BackupSystemTable.META_FAMILY);
-    get.setMaxVersions(1);
-    return get;
-  }
-
-  /**
-   * Creates Put operation to store start code to hbase:backup
-   * @return put operation
-   * @throws IOException exception
-   */
-  static Put createPutForStartCode(String startCode, String rootPath) {
-    Put put = new Put(rowkey(START_CODE_ROW, rootPath));
-    put.addColumn(BackupSystemTable.META_FAMILY, "startcode".getBytes(), startCode.getBytes());
-    return put;
-  }
-
-  /**
-   * Creates Get to retrieve incremental backup table set from hbase:backup
-   * @return get operation
-   * @throws IOException exception
-   */
-  static Get createGetForIncrBackupTableSet(String backupRoot) throws IOException {
-    Get get = new Get(rowkey(INCR_BACKUP_SET, backupRoot));
-    get.addFamily(BackupSystemTable.META_FAMILY);
-    get.setMaxVersions(1);
-    return get;
-  }
-
-  /**
-   * Creates Put to store incremental backup table set
-   * @param tables tables
-   * @return put operation
-   */
-  static Put createPutForIncrBackupTableSet(Set<TableName> tables, String backupRoot) {
-    Put put = new Put(rowkey(INCR_BACKUP_SET, backupRoot));
-    for (TableName table : tables) {
-      put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes(table.getNameAsString()),
-        EMPTY_VALUE);
-    }
-    return put;
-  }
-
-  /**
-   * Creates Scan operation to load backup history
-   * @return scan operation
-   */
-  static Scan createScanForBackupHistory() {
-    Scan scan = new Scan();
-    byte[] startRow = BACKUP_INFO_PREFIX.getBytes();
-    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
-    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
-    scan.setStartRow(startRow);
-    scan.setStopRow(stopRow);
-    scan.addFamily(BackupSystemTable.SESSIONS_FAMILY);
-
-    return scan;
-  }
-
-  /**
-   * Converts cell to backup context instance.
-   * @param current - cell
-   * @return backup context instance
-   * @throws IOException exception
-   */
-  static BackupInfo cellToBackupInfo(Cell current) throws IOException {
-    byte[] data = CellUtil.cloneValue(current);
-    return BackupInfo.fromByteArray(data);
-  }
-
-  /**
-   * Creates Put to write RS last roll log timestamp map
-   * @param table - table
-   * @param smap - map, containing RS:ts
-   * @return put operation
-   */
-  static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, 
-      String backupRoot) {    
-    Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString()));
-    put.addColumn(BackupSystemTable.META_FAMILY, "log-roll-map".getBytes(), smap);
-    return put;
-  }
-
-  /**
-   * Creates Scan to load table-> { RS -> ts} map of maps
-   * @return scan operation
-   */
-  static Scan createScanForReadLogTimestampMap(String backupRoot) {
-    Scan scan = new Scan();
-    byte[] startRow = rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot);
-    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
-    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
-    scan.setStartRow(startRow);
-    scan.setStopRow(stopRow);
-    scan.addFamily(BackupSystemTable.META_FAMILY);
-
-    return scan;
-  }
-
-  /**
-   * Get table name from rowkey
-   * @param cloneRow rowkey
-   * @return table name
-   */
-  static String getTableNameForReadLogTimestampMap(byte[] cloneRow) {
-    String s = new String(cloneRow);
-    int index = s.lastIndexOf(NULL); 
-    return s.substring(index +1);
-  }
-
-  /**
-   * Creates Put to store RS last log result
-   * @param server - server name
-   * @param timestamp - log roll result (timestamp)
-   * @return put operation
-   */
-  static Put createPutForRegionServerLastLogRollResult(String server, 
-      Long timestamp, String backupRoot ) {
-    Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server));
-    put.addColumn(BackupSystemTable.META_FAMILY, "rs-log-ts".getBytes(), 
-      timestamp.toString().getBytes());
-    return put;
-  }
-
-  /**
-   * Creates Scan operation to load last RS log roll results
-   * @return scan operation
-   */
-  static Scan createScanForReadRegionServerLastLogRollResult(String backupRoot) {
-    Scan scan = new Scan();
-    byte[] startRow = rowkey(RS_LOG_TS_PREFIX, backupRoot);
-    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
-    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
-    scan.setStartRow(startRow);
-    scan.setStopRow(stopRow);
-    scan.addFamily(BackupSystemTable.META_FAMILY);
-
-    return scan;
-  }
-
-  /**
-   * Get server's name from rowkey
-   * @param row - rowkey
-   * @return server's name
-   */
-  static String getServerNameForReadRegionServerLastLogRollResult(byte[] row) {
-    String s = new String(row);
-    int index = s.lastIndexOf(NULL);
-    return s.substring(index +1);
-  }
-
-  /**
-   * Creates put list for list of WAL files
-   * @param files list of WAL file paths
-   * @param backupId backup id
-   * @return put list
-   * @throws IOException exception
-   */
-  public static List<Put> createPutsForAddWALFiles(List<String> files, 
-    String backupId, String backupRoot)
-      throws IOException {
-
-    List<Put> puts = new ArrayList<Put>();
-    for (String file : files) {
-      Put put = new Put(rowkey(WALS_PREFIX, BackupUtil.getUniqueWALFileNamePart(file)));
-      put.addColumn(BackupSystemTable.META_FAMILY, "backupId".getBytes(), backupId.getBytes());
-      put.addColumn(BackupSystemTable.META_FAMILY, "file".getBytes(), file.getBytes());
-      put.addColumn(BackupSystemTable.META_FAMILY, "root".getBytes(), backupRoot.getBytes());
-      puts.add(put);
-    }
-    return puts;
-  }
-
-  /**
-   * Creates Scan operation to load WALs
-   * TODO: support for backupRoot
-   * @param backupRoot - path to backup destination 
-   * @return scan operation
-   */
-  public static Scan createScanForGetWALs(String backupRoot) {
-    Scan scan = new Scan();
-    byte[] startRow = WALS_PREFIX.getBytes();
-    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
-    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
-    scan.setStartRow(startRow);
-    scan.setStopRow(stopRow);
-    scan.addFamily(BackupSystemTable.META_FAMILY);
-    return scan;
-  }
-  /**
-   * Creates Get operation for a given wal file name
-   * TODO: support for backup destination
-   * @param file file
-   * @return get operation
-   * @throws IOException exception
-   */
-  public static Get createGetForCheckWALFile(String file) throws IOException {
-    Get get = new Get(rowkey(WALS_PREFIX, BackupUtil.getUniqueWALFileNamePart(file)));
-    // add backup root column
-    get.addFamily(BackupSystemTable.META_FAMILY);
-    return get;
-  }
-
-  
- /**
-  * Creates Scan operation to load backup set list
-  * @return scan operation
-  */
- static Scan createScanForBackupSetList() {
-   Scan scan = new Scan();
-   byte[] startRow = SET_KEY_PREFIX.getBytes();
-   byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
-   stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
-   scan.setStartRow(startRow);
-   scan.setStopRow(stopRow);
-   scan.addFamily(BackupSystemTable.META_FAMILY);
-   return scan;
- }
-
- /**
-  * Creates Get operation to load backup set content
-  * @return get operation
-  */
- static Get createGetForBackupSet(String name) {    
-   Get get  = new Get(rowkey(SET_KEY_PREFIX, name));
-   get.addFamily(BackupSystemTable.META_FAMILY);
-   return get;
- }
- 
- /**
-  * Creates Delete operation to delete backup set content
-  * @return delete operation
-  */
- static Delete createDeleteForBackupSet(String name) {    
-   Delete del  = new Delete(rowkey(SET_KEY_PREFIX, name));
-   del.addFamily(BackupSystemTable.META_FAMILY);
-   return del;
- }
- 
- 
- /**
-  * Creates Put operation to update backup set content
-  * @return put operation
-  */
- static Put createPutForBackupSet(String name, String[] tables) {    
-   Put put  = new Put(rowkey(SET_KEY_PREFIX, name));
-   byte[] value = convertToByteArray(tables);
-   put.addColumn(BackupSystemTable.META_FAMILY, "tables".getBytes(), value);
-   return put;
- }
- 
- private static byte[] convertToByteArray(String[] tables) {
-   return StringUtils.join(tables, ",").getBytes();
- }
-
- 
- /**
-  * Converts cell to backup set list.
-  * @param current - cell
-  * @return backup set 
-  * @throws IOException
-  */
- static  String[] cellValueToBackupSet(Cell current) throws IOException {
-   byte[] data = CellUtil.cloneValue(current);
-   if( data != null && data.length > 0){
-     return new String(data).split(",");
-   } else{
-     return new String[0];
-   }
- }
-
- /**
-  * Converts cell key to backup set name.
-  * @param current - cell
-  * @return backup set name
-  * @throws IOException
-  */
- static  String cellKeyToBackupSetName(Cell current) throws IOException {
-   byte[] data = CellUtil.cloneRow(current);    
-   return new String(data).substring(SET_KEY_PREFIX.length());    
- }
- 
- static byte[] rowkey(String s, String ... other){
-   StringBuilder sb = new StringBuilder(s);
-   for(String ss: other){
-     sb.append(ss);
-   }
-   return sb.toString().getBytes();   
- }
- 
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java
deleted file mode 100644
index df2b0a6..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java
+++ /dev/null
@@ -1,472 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupClientUtil;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.wal.DefaultWALProvider;
-
-/**
- * A collection for methods used by multiple classes to backup HBase tables.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class BackupUtil {
-  protected static final Log LOG = LogFactory.getLog(BackupUtil.class);
-  public static final String LOGNAME_SEPARATOR = ".";
-
-  private BackupUtil(){
-    throw new AssertionError("Instantiating utility class...");
-  }
-
-  public static void waitForSnapshot(SnapshotDescription snapshot, long max,
-      SnapshotManager snapshotMgr, Configuration conf) throws IOException {
-    boolean done = false;
-    long start = EnvironmentEdgeManager.currentTime();
-    int numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-    long maxPauseTime = max / numRetries;
-    int tries = 0;
-    LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
-        ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
-        maxPauseTime + " ms per retry)");
-    while (tries == 0
-        || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
-      try {
-        // sleep a backoff <= pauseTime amount
-        long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-          HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-        long sleep = HBaseAdmin.getPauseTime(tries++, pause);
-        sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
-        LOG.debug("(#" + tries + ") Sleeping: " + sleep +
-          "ms while waiting for snapshot completion.");
-        Thread.sleep(sleep);
-      } catch (InterruptedException e) {
-        throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
-      }
-      LOG.debug("Getting current status of snapshot ...");
-      done = snapshotMgr.isSnapshotDone(snapshot);
-    }
-    if (!done) {
-      throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
-          + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
-    }
-  }
-
-  /**
-   * Loop through the RS log timestamp map for the tables, for each RS, find the min timestamp
-   * value for the RS among the tables.
-   * @param rsLogTimestampMap timestamp map
-   * @return the min timestamp of each RS
-   */
-  public static HashMap<String, Long> getRSLogTimestampMins(
-    HashMap<TableName, HashMap<String, Long>> rsLogTimestampMap) {
-
-    if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) {
-      return null;
-    }
-
-    HashMap<String, Long> rsLogTimestamptMins = new HashMap<String, Long>();
-    HashMap<String, HashMap<TableName, Long>> rsLogTimestampMapByRS =
-        new HashMap<String, HashMap<TableName, Long>>();
-
-    for (Entry<TableName, HashMap<String, Long>> tableEntry : rsLogTimestampMap.entrySet()) {
-      TableName table = tableEntry.getKey();
-      HashMap<String, Long> rsLogTimestamp = tableEntry.getValue();
-      for (Entry<String, Long> rsEntry : rsLogTimestamp.entrySet()) {
-        String rs = rsEntry.getKey();
-        Long ts = rsEntry.getValue();
-        if (!rsLogTimestampMapByRS.containsKey(rs)) {
-          rsLogTimestampMapByRS.put(rs, new HashMap<TableName, Long>());
-          rsLogTimestampMapByRS.get(rs).put(table, ts);
-        } else {
-          rsLogTimestampMapByRS.get(rs).put(table, ts);
-        }
-      }
-    }
-
-    for (String rs : rsLogTimestampMapByRS.keySet()) {
-      rsLogTimestamptMins.put(rs, BackupClientUtil.getMinValue(rsLogTimestampMapByRS.get(rs)));
-    }
-
-    return rsLogTimestamptMins;
-  }
-
-  /**
-   * copy out Table RegionInfo into incremental backup image need to consider move this logic into
-   * HBackupFileSystem
-   * @param backupContext backup context
-   * @param conf configuration
-   * @throws IOException exception
-   * @throws InterruptedException exception
-   */
-  public static void copyTableRegionInfo(BackupInfo backupContext, Configuration conf)
-      throws IOException, InterruptedException {
-
-    Path rootDir = FSUtils.getRootDir(conf);
-    FileSystem fs = rootDir.getFileSystem(conf);
-
-    // for each table in the table set, copy out the table info and region info files in the correct
-    // directory structure
-    for (TableName table : backupContext.getTables()) {
-
-      LOG.debug("Attempting to copy table info for:" + table);
-      TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
-
-      // write a copy of descriptor to the target directory
-      Path target = new Path(backupContext.getBackupStatus(table).getTargetDir());
-      FileSystem targetFs = target.getFileSystem(conf);
-      FSTableDescriptors descriptors =
-          new FSTableDescriptors(conf, targetFs, FSUtils.getRootDir(conf));
-      descriptors.createTableDescriptorForTableDirectory(target, orig, false);
-      LOG.debug("Finished copying tableinfo.");
-
-      // TODO: optimize
-      List<HRegionInfo> regions = null;
-      try(Connection conn = ConnectionFactory.createConnection(conf);
-          Admin admin = conn.getAdmin()) {
-        regions = admin.getTableRegions(table);
-      } catch (Exception e) {
-        throw new BackupException(e);
-      }
-
-      // For each region, write the region info to disk
-      LOG.debug("Starting to write region info for table " + table);
-      for (HRegionInfo regionInfo : regions) {
-        Path regionDir =
-            HRegion.getRegionDir(new Path(backupContext.getBackupStatus(table).getTargetDir()),
-              regionInfo);
-        regionDir =
-            new Path(backupContext.getBackupStatus(table).getTargetDir(), regionDir.getName());
-        writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
-      }
-      LOG.debug("Finished writing region info for table " + table);
-    }
-  }
-
-  /**
-   * Write the .regioninfo file on-disk.
-   */
-  public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs,
-      final Path regionInfoDir, HRegionInfo regionInfo) throws IOException {
-    final byte[] content = regionInfo.toDelimitedByteArray();
-    Path regionInfoFile = new Path(regionInfoDir, ".regioninfo");
-    // First check to get the permissions
-    FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
-    // Write the RegionInfo file content
-    FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null);
-    try {
-      out.write(content);
-    } finally {
-      out.close();
-    }
-  }
-
-  /**
-   * TODO: return hostname:port
-   * @param p
-   * @return host name: port
-   * @throws IOException
-   */
-  public static String parseHostNameFromLogFile(Path p) throws IOException {
-    if (isArchivedLogFile(p)) {
-      return BackupClientUtil.parseHostFromOldLog(p);
-    } else {
-      ServerName sname = DefaultWALProvider.getServerNameFromWALDirectoryName(p);
-      return sname.getHostname() + ":" + sname.getPort();
-    }
-  }
-
-  private static boolean isArchivedLogFile(Path p) {
-    String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR;
-    return p.toString().contains(oldLog);
-  }
-
-  /**
-   * Returns WAL file name
-   * @param walFileName WAL file name
-   * @return WAL file name
-   * @throws IOException exception
-   * @throws IllegalArgumentException exception
-   */
-  public static String getUniqueWALFileNamePart(String walFileName) throws IOException {
-    return getUniqueWALFileNamePart(new Path(walFileName));
-  }
-
-  /**
-   * Returns WAL file name
-   * @param p - WAL file path
-   * @return WAL file name
-   * @throws IOException exception
-   */
-  public static String getUniqueWALFileNamePart(Path p) throws IOException {
-    return p.getName();
-  }
-
-  /**
-   * Get the total length of files under the given directory recursively.
-   * @param fs The hadoop file system
-   * @param dir The target directory
-   * @return the total length of files
-   * @throws IOException exception
-   */
-  public static long getFilesLength(FileSystem fs, Path dir) throws IOException {
-    long totalLength = 0;
-    FileStatus[] files = FSUtils.listStatus(fs, dir);
-    if (files != null) {
-      for (FileStatus fileStatus : files) {
-        if (fileStatus.isDirectory()) {
-          totalLength += getFilesLength(fs, fileStatus.getPath());
-        } else {
-          totalLength += fileStatus.getLen();
-        }
-      }
-    }
-    return totalLength;
-  }
-
-
-  
-  /**
-   * Sort history list by start time in descending order.
-   * @param historyList history list
-   * @return sorted list of BackupCompleteData
-   */
-  public static ArrayList<BackupInfo> sortHistoryListDesc(
-    ArrayList<BackupInfo> historyList) {
-    ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
-    TreeMap<String, BackupInfo> map = new TreeMap<String, BackupInfo>();
-    for (BackupInfo h : historyList) {
-      map.put(Long.toString(h.getStartTs()), h);
-    }
-    Iterator<String> i = map.descendingKeySet().iterator();
-    while (i.hasNext()) {
-      list.add(map.get(i.next()));
-    }
-    return list;
-  }
-
-  /**
-   * Get list of all WAL files (WALs and archive)
-   * @param c - configuration
-   * @return list of WAL files
-   * @throws IOException exception
-   */
-  public static List<String> getListOfWALFiles(Configuration c) throws IOException {
-    Path rootDir = FSUtils.getRootDir(c);
-    Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
-    Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-    List<String> logFiles = new ArrayList<String>();
-
-    FileSystem fs = FileSystem.get(c);
-    logFiles = BackupClientUtil.getFiles(fs, logDir, logFiles, null);
-    logFiles = BackupClientUtil.getFiles(fs, oldLogDir, logFiles, null);
-    return logFiles;
-  }
-
-  /**
-   * Get list of all WAL files (WALs and archive)
-   * @param c - configuration
-   * @return list of WAL files
-   * @throws IOException exception
-   */
-  public static List<String> getListOfWALFiles(Configuration c, PathFilter filter)
-      throws IOException {
-    Path rootDir = FSUtils.getRootDir(c);
-    Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
-    Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-    List<String> logFiles = new ArrayList<String>();
-
-    FileSystem fs = FileSystem.get(c);
-    logFiles = BackupClientUtil.getFiles(fs, logDir, logFiles, filter);
-    logFiles = BackupClientUtil.getFiles(fs, oldLogDir, logFiles, filter);
-    return logFiles;
-  }
-
-  /**
-   * Get list of all old WAL files (WALs and archive)
-   * @param c - configuration
-   * @return list of WAL files
-   * @throws IOException exception
-   */
-  public static List<String> getWALFilesOlderThan(final Configuration c,
-    final HashMap<String, Long> hostTimestampMap) throws IOException {
-    Path rootDir = FSUtils.getRootDir(c);
-    Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
-    Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-    List<String> logFiles = new ArrayList<String>();
-
-    PathFilter filter = new PathFilter() {
-
-      @Override
-      public boolean accept(Path p) {
-        try {
-          if (DefaultWALProvider.isMetaFile(p)) {
-            return false;
-          }
-          String host = parseHostNameFromLogFile(p);
-          Long oldTimestamp = hostTimestampMap.get(host);
-          Long currentLogTS = BackupClientUtil.getCreationTime(p);
-          return currentLogTS <= oldTimestamp;
-        } catch (IOException e) {
-          LOG.error(e);
-          return false;
-        }
-      }
-    };
-    FileSystem fs = FileSystem.get(c);
-    logFiles = BackupClientUtil.getFiles(fs, logDir, logFiles, filter);
-    logFiles = BackupClientUtil.getFiles(fs, oldLogDir, logFiles, filter);
-    return logFiles;
-  }
-
-  public static String join(TableName[] names) {
-    StringBuilder sb = new StringBuilder();
-    String sep = BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND;
-    for (TableName s : names) {
-      sb.append(sep).append(s.getNameAsString());
-    }
-    return sb.toString();
-  }
-
-  public static TableName[] parseTableNames(String tables) {
-    if (tables == null) {
-      return null;
-    }
-    String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
-
-    TableName[] ret = new TableName[tableArray.length];
-    for (int i = 0; i < tableArray.length; i++) {
-      ret[i] = TableName.valueOf(tableArray[i]);
-    }
-    return ret;
-  }
-  
-  public static void cleanupBackupData(BackupInfo context, Configuration conf) 
-      throws IOException 
-  {
-    cleanupHLogDir(context, conf);
-    cleanupTargetDir(context, conf);
-  }
-
-  /**
-   * Clean up directories which are generated when DistCp copying hlogs.
-   * @throws IOException
-   */
-  private static void cleanupHLogDir(BackupInfo backupContext, Configuration conf)
-      throws IOException {
-
-    String logDir = backupContext.getHLogTargetDir();
-    if (logDir == null) {
-      LOG.warn("No log directory specified for " + backupContext.getBackupId());
-      return;
-    }
-
-    Path rootPath = new Path(logDir).getParent();
-    FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
-    FileStatus[] files = FSUtils.listStatus(fs, rootPath);
-    if (files == null) {
-      return;
-    }
-    for (FileStatus file : files) {
-      LOG.debug("Delete log files: " + file.getPath().getName());
-      FSUtils.delete(fs, file.getPath(), true);
-    }
-  }
-
-  /**
-   * Clean up the data at target directory
-   */
-  private static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) {
-    try {
-      // clean up the data at target directory
-      LOG.debug("Trying to cleanup up target dir : " + backupContext.getBackupId());
-      String targetDir = backupContext.getTargetRootDir();
-      if (targetDir == null) {
-        LOG.warn("No target directory specified for " + backupContext.getBackupId());
-        return;
-      }
-
-      FileSystem outputFs =
-          FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf);
-
-      for (TableName table : backupContext.getTables()) {
-        Path targetDirPath =
-            new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(),
-              backupContext.getBackupId(), table));
-        if (outputFs.delete(targetDirPath, true)) {
-          LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
-        } else {
-          LOG.info("No data has been found in " + targetDirPath.toString() + ".");
-        }
-
-        Path tableDir = targetDirPath.getParent();
-        FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir);
-        if (backups == null || backups.length == 0) {
-          outputFs.delete(tableDir, true);
-          LOG.debug(tableDir.toString() + " is empty, remove it.");
-        }
-      }
-
-    } catch (IOException e1) {
-      LOG.error("Cleaning up backup data of " + backupContext.getBackupId() + " at "
-          + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
-    }
-  }
-  
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index 8338fee..8e192f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -33,9 +33,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupClientUtil;
 import org.apache.hadoop.hbase.backup.BackupInfo;
 import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
+import org.apache.hadoop.hbase.backup.util.BackupServerUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
@@ -86,7 +87,7 @@ public class IncrementalBackupManager {
     HashMap<TableName, HashMap<String, Long>> previousTimestampMap =
         backupManager.readLogTimestampMap();
 
-    previousTimestampMins = BackupUtil.getRSLogTimestampMins(previousTimestampMap);    
+    previousTimestampMins = BackupServerUtil.getRSLogTimestampMins(previousTimestampMap);    
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("StartCode " + savedStartCode + "for backupID " + backupContext.getBackupId());
@@ -170,7 +171,7 @@ public class IncrementalBackupManager {
         continue;
       }
       String walFileName = item.getWalFile();      
-      String server = BackupUtil.parseHostNameFromLogFile(new Path(walFileName));
+      String server = BackupServerUtil.parseHostNameFromLogFile(new Path(walFileName));
       Long tss = getTimestamp(walFileName);
       Long oldTss = olderTimestamps.get(server);
       Long newTss = newestTimestamps.get(server);
@@ -189,7 +190,7 @@ public class IncrementalBackupManager {
   }
 
   private Long getTimestamp(String walFileName) {
-    int index = walFileName.lastIndexOf(BackupUtil.LOGNAME_SEPARATOR);
+    int index = walFileName.lastIndexOf(BackupServerUtil.LOGNAME_SEPARATOR);
     return Long.parseLong(walFileName.substring(index+1));
   }
 
@@ -237,7 +238,7 @@ public class IncrementalBackupManager {
     rss = fs.listStatus(logDir);
     for (FileStatus rs : rss) {
       p = rs.getPath();
-      host = BackupUtil.parseHostNameFromLogFile(p);
+      host = BackupServerUtil.parseHostNameFromLogFile(p);
       FileStatus[] logs;
       oldTimeStamp = olderTimestamps.get(host);
       // It is possible that there is no old timestamp in hbase:backup for this host if

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java
deleted file mode 100644
index 8904184..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface IncrementalRestoreService extends Configurable{
-
-  /**
-   * Run restore operation
-   * @param logDirectoryPaths - path array of WAL log directories
-   * @param fromTables - from tables
-   * @param toTables - to tables
-   * @throws IOException
-   */
-  public void run(Path[] logDirectoryPaths, TableName[] fromTables, TableName[] toTables)
-    throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java
index f16d213..9906f47 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java
@@ -34,10 +34,11 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.BackupClientUtil;
 import org.apache.hadoop.hbase.backup.HBackupFileSystem;
 import org.apache.hadoop.hbase.backup.RestoreClient;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
+import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
@@ -62,6 +63,8 @@ public final class RestoreClientImpl implements RestoreClient {
     this.conf = conf;
   }
 
+  
+  
   /**
    * Restore operation. Stage 1: validate backupManifest, and check target tables
    * @param backupRootDir The root dir for backup image
@@ -72,11 +75,10 @@ public final class RestoreClientImpl implements RestoreClient {
    * @param tTableArray The array of mapping tables to restore to
    * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the
    *          request if target table exists
-   * @return True if only do dependency check
    * @throws IOException if any failure during restore
    */
   @Override
-  public boolean restore(String backupRootDir,
+  public void restore(String backupRootDir,
       String backupId, boolean check, boolean autoRestore, TableName[] sTableArray,
       TableName[] tTableArray, boolean isOverwrite) throws IOException {
 
@@ -98,11 +100,6 @@ public final class RestoreClientImpl implements RestoreClient {
         }
       }
 
-      // return true if only for check
-      if (check) {
-        return true;
-      }
-
       if (tTableArray == null) {
         tTableArray = sTableArray;
       }
@@ -121,8 +118,6 @@ public final class RestoreClientImpl implements RestoreClient {
       throw e;
     }
 
-    // not only for check, return false
-    return false;
   }
 
 
@@ -278,7 +273,7 @@ public final class RestoreClientImpl implements RestoreClient {
     String backupId = image.getBackupId();
 
     Path rootPath = new Path(rootDir);
-    RestoreUtil restoreTool = new RestoreUtil(conf, rootPath, backupId);
+    RestoreServerUtil restoreTool = new RestoreServerUtil(conf, rootPath, backupId);
     BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, rootPath, backupId);
 
     Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, rootPath,  backupId);
@@ -320,7 +315,7 @@ public final class RestoreClientImpl implements RestoreClient {
     Path backupRoot = new Path(rootDir);
     
     // We need hFS only for full restore (see the code)
-    RestoreUtil restoreTool = new RestoreUtil(conf, backupRoot, backupId);
+    RestoreServerUtil restoreTool = new RestoreServerUtil(conf, backupRoot, backupId);
     BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId);
 
     Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId);


Mime
View raw message