hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jmhs...@apache.org
Subject svn commit: r1445814 [2/3] - in /hbase/branches/hbase-7290/hbase-server/src: main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/client/ main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/hbase/master/snapshot/ main/...
Date Wed, 13 Feb 2013 18:37:34 GMT
Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Thrown when a snapshot could not be exported due to an error during the operation.
+ */
+@InterfaceAudience.Public
+@SuppressWarnings("serial")
+public class ExportSnapshotException extends HBaseSnapshotException {
+
+  /**
+   * @param msg message describing the exception
+   */
+  public ExportSnapshotException(String msg) {
+    super(msg);
+  }
+
+  /**
+   * @param message message describing the exception
+   * @param e cause
+   */
+  public ExportSnapshotException(String message, Exception e) {
+    super(message, e);
+  }
+}

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+/**
+ * General exception base class for when a snapshot fails
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HBaseSnapshotException extends HBaseIOException {
+
+  private SnapshotDescription description;
+
+  /**
+   * Some exception happened for a snapshot and don't even know the snapshot that it was about
+   * @param msg Full description of the failure
+   */
+  public HBaseSnapshotException(String msg) {
+    super(msg);
+  }
+
+  /**
+   * Exception for the given snapshot that has no previous root cause
+   * @param msg reason why the snapshot failed
+   * @param desc description of the snapshot that is being failed
+   */
+  public HBaseSnapshotException(String msg, SnapshotDescription desc) {
+    super(msg);
+    this.description = desc;
+  }
+
+  /**
+   * Exception for the given snapshot due to another exception
+   * @param msg reason why the snapshot failed
+   * @param cause root cause of the failure
+   * @param desc description of the snapshot that is being failed
+   */
+  public HBaseSnapshotException(String msg, Throwable cause, SnapshotDescription desc) {
+    super(msg, cause);
+    this.description = desc;
+  }
+
+  /**
+   * Exception when the description of the snapshot cannot be determined, due to some root other
+   * root cause
+   * @param message description of what caused the failure
+   * @param e root cause
+   */
+  public HBaseSnapshotException(String message, Exception e) {
+    super(message, e);
+  }
+
+  public SnapshotDescription getSnapshotDescription() {
+    return this.description;
+  }
+}

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * Reference all the hfiles in a region for a snapshot.
+ * <p>
+ * Doesn't take into acccount if the hfiles are valid or not, just keeps track of what's in the
+ * region's directory.
+ */
+public class ReferenceRegionHFilesTask extends SnapshotTask {
+
+  public static final Log LOG = LogFactory.getLog(ReferenceRegionHFilesTask.class);
+  private final Path regiondir;
+  private final FileSystem fs;
+  private final PathFilter fileFilter;
+  private final Path snapshotDir;
+
+  /**
+   * Reference all the files in the given region directory
+   * @param snapshot snapshot for which to add references
+   * @param monitor to check/send error
+   * @param regionDir region directory to look for errors
+   * @param fs {@link FileSystem} where the snapshot/region live
+   * @param regionSnapshotDir directory in the snapshot to store region files
+   */
+  public ReferenceRegionHFilesTask(final SnapshotDescription snapshot,
+      ForeignExceptionDispatcher monitor, Path regionDir, final FileSystem fs, Path regionSnapshotDir) {
+    super(snapshot, monitor);
+    this.regiondir = regionDir;
+    this.fs = fs;
+
+    this.fileFilter = new PathFilter() {
+      @Override
+      public boolean accept(Path path) {
+        try {
+          return fs.isFile(path);
+        } catch (IOException e) {
+          LOG.error("Failed to reach fs to check file:" + path + ", marking as not file");
+          ReferenceRegionHFilesTask.this.snapshotFailure("Failed to reach fs to check file status",
+            e);
+          return false;
+        }
+      }
+    };
+    this.snapshotDir = regionSnapshotDir;
+  }
+
+  @Override
+  public Void call() throws IOException {
+    FileStatus[] families = FSUtils.listStatus(fs, regiondir, new FSUtils.FamilyDirFilter(fs));
+
+    // if no families, then we are done again
+    if (families == null || families.length == 0) {
+      LOG.info("No families under region directory:" + regiondir
+          + ", not attempting to add references.");
+      return null;
+    }
+
+    // snapshot directories to store the hfile reference
+    List<Path> snapshotFamilyDirs = TakeSnapshotUtils.getFamilySnapshotDirectories(snapshot,
+      snapshotDir, families);
+
+    LOG.debug("Add hfile references to snapshot directories:" + snapshotFamilyDirs);
+    for (int i = 0; i < families.length; i++) {
+      FileStatus family = families[i];
+      Path familyDir = family.getPath();
+      // get all the hfiles in the family
+      FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir, fileFilter);
+
+      // if no hfiles, then we are done with this family
+      if (hfiles == null || hfiles.length == 0) {
+        LOG.debug("Not hfiles found for family: " + familyDir + ", skipping.");
+        continue;
+      }
+
+      // make the snapshot's family directory
+      Path snapshotFamilyDir = snapshotFamilyDirs.get(i);
+      fs.mkdirs(snapshotFamilyDir);
+
+      // create a reference for each hfile
+      for (FileStatus hfile : hfiles) {
+        // references are 0-length files, relying on file name.
+        Path referenceFile = new Path(snapshotFamilyDir, hfile.getPath().getName());
+        LOG.debug("Creating reference for:" + hfile.getPath() + " at " + referenceFile);
+        if (!fs.createNewFile(referenceFile)) {
+          throw new IOException("Failed to create reference file:" + referenceFile);
+        }
+      }
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Finished referencing hfiles, current region state:");
+      FSUtils.logFileSystemState(fs, regiondir, LOG);
+      LOG.debug("and the snapshot directory:");
+      FSUtils.logFileSystemState(fs, snapshotDir, LOG);
+    }
+    return null;
+  }
+}
\ No newline at end of file

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceServerWALsTask.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceServerWALsTask.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceServerWALsTask.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceServerWALsTask.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * Reference all the WAL files under a server's WAL directory
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ReferenceServerWALsTask extends SnapshotTask {
+  private static final Log LOG = LogFactory.getLog(ReferenceServerWALsTask.class);
+  private final FileSystem fs;
+  private final Configuration conf;
+  private final String serverName;
+  private Path logDir;
+
+  /**
+   * @param snapshot snapshot being run
+   * @param failureListener listener to check for errors while running the operation and to
+   *          propagate errors found while running the task
+   * @param logDir log directory for the server. Name of the directory is taken as the name of the
+   *          server
+   * @param conf {@link Configuration} to extract filesystem information
+   * @param fs filesystem where the log files are stored and should be referenced
+   */
+  public ReferenceServerWALsTask(SnapshotDescription snapshot,
+      ForeignExceptionDispatcher failureListener, final Path logDir, final Configuration conf,
+      final FileSystem fs) {
+    super(snapshot, failureListener);
+    this.fs = fs;
+    this.conf = conf;
+    this.serverName = logDir.getName();
+    this.logDir = logDir;
+  }
+
+  /**
+   * Create reference files (empty files with the same path and file name as original).
+   * @throws IOException exception from hdfs or network problems
+   * @throws ForeignException exception from an external procedure
+   */
+  @Override
+  public Void call() throws IOException, ForeignException {
+    // TODO switch to using a single file to reference all required WAL files
+
+    // Iterate through each of the log files and add a reference to it.
+    // assumes that all the files under the server's logs directory is a log
+    FileStatus[] serverLogs = FSUtils.listStatus(fs, logDir, null);
+    if (serverLogs == null) LOG.info("No logs for server directory:" + logDir
+        + ", done referencing files.");
+
+    if (LOG.isDebugEnabled()) LOG.debug("Adding references for WAL files:"
+        + Arrays.toString(serverLogs));
+
+    for (FileStatus file : serverLogs) {
+      this.rethrowException();
+
+      // add the reference to the file. ex: hbase/.snapshots/.logs/<serverName>/<hlog>
+      Path rootDir = FSUtils.getRootDir(conf);
+      Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(this.snapshot, rootDir);
+      Path snapshotLogDir = TakeSnapshotUtils.getSnapshotHLogsDir(snapshotDir, serverName);
+      // actually store the reference on disk (small file)
+      Path ref = new Path(snapshotLogDir, file.getPath().getName());
+      if (!fs.createNewFile(ref)) {
+        if (!fs.exists(ref)) {
+          throw new IOException("Couldn't create reference for:" + file.getPath());
+        }
+      }
+      LOG.debug("Completed WAL referencing for: " + file.getPath() + " to " + ref);
+    }
+
+    LOG.debug("Successfully completed WAL referencing for ALL files");
+    return null;
+  }
+}
\ No newline at end of file

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+/**
+ * Thrown when a snapshot could not be restored due to a server-side error when restoring it.
+ */
+@SuppressWarnings("serial")
+public class RestoreSnapshotException extends HBaseSnapshotException {
+  public RestoreSnapshotException(String msg, SnapshotDescription desc) {
+    super(msg, desc);
+  }
+
+  public RestoreSnapshotException(String msg, Throwable cause, SnapshotDescription desc) {
+    super(msg, cause, desc);
+  }
+
+  public RestoreSnapshotException(String msg) {
+    super(msg);
+  }
+
+  public RestoreSnapshotException(String message, Exception e) {
+    super(message, e);
+  }
+}

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,424 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
+import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FSVisitor;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+
+/**
+ * Helper to Restore/Clone a Snapshot
+ *
+ * <p>The helper assumes that a table is already created, and by calling restore()
+ * the content present in the snapshot will be restored as the new content of the table.
+ *
+ * <p>Clone from Snapshot: If the target table is empty, the restore operation
+ * is just a "clone operation", where the only operations are:
+ * <ul>
+ *  <li>for each region in the snapshot create a new region
+ *    (note that the region will have a different name, since the encoding contains the table name)
+ *  <li>for each file in the region create a new HFileLink to point to the original file.
+ *  <li>restore the logs, if any
+ * </ul>
+ *
+ * <p>Restore from Snapshot:
+ * <ul>
+ *  <li>for each region in the table verify which are available in the snapshot and which are not
+ *    <ul>
+ *    <li>if the region is not present in the snapshot, remove it.
+ *    <li>if the region is present in the snapshot
+ *      <ul>
+ *      <li>for each file in the table region verify which are available in the snapshot
+ *        <ul>
+ *          <li>if the hfile is not present in the snapshot, remove it
+ *          <li>if the hfile is present, keep it (nothing to do)
+ *        </ul>
+ *      <li>for each file in the snapshot region but not in the table
+ *        <ul>
+ *          <li>create a new HFileLink that point to the original file
+ *        </ul>
+ *      </ul>
+ *    </ul>
+ *  <li>for each region in the snapshot not present in the current table state
+ *    <ul>
+ *    <li>create a new region and for each file in the region create a new HFileLink
+ *      (This is the same as the clone operation)
+ *    </ul>
+ *  <li>restore the logs, if any
+ * </ul>
+ */
+@InterfaceAudience.Private
+public class RestoreSnapshotHelper {
+	private static final Log LOG = LogFactory.getLog(RestoreSnapshotHelper.class);
+
+  private final Map<byte[], byte[]> regionsMap =
+        new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
+
+  private final ForeignExceptionDispatcher monitor;
+
+  private final SnapshotDescription snapshotDesc;
+  private final Path snapshotDir;
+
+  private final HTableDescriptor tableDesc;
+  private final Path tableDir;
+
+  private final CatalogTracker catalogTracker;
+  private final Configuration conf;
+  private final FileSystem fs;
+
+  public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs,
+      final CatalogTracker catalogTracker,
+      final SnapshotDescription snapshotDescription, final Path snapshotDir,
+      final HTableDescriptor tableDescriptor, final Path tableDir,
+      final ForeignExceptionDispatcher monitor)
+  {
+    this.fs = fs;
+    this.conf = conf;
+    this.catalogTracker = catalogTracker;
+    this.snapshotDesc = snapshotDescription;
+    this.snapshotDir = snapshotDir;
+    this.tableDesc = tableDescriptor;
+    this.tableDir = tableDir;
+    this.monitor = monitor;
+  }
+
+  /**
+   * Restore table to a specified snapshot state.
+   */
+  public void restore() throws IOException {
+    long startTime = EnvironmentEdgeManager.currentTimeMillis();
+
+    LOG.debug("starting restore");
+    Set<String> snapshotRegionNames = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
+    if (snapshotRegionNames == null) {
+      LOG.warn("Nothing to restore. Snapshot " + snapshotDesc + " looks empty");
+      return;
+    }
+
+    // Identify which region are still available and which not.
+    // NOTE: we rely upon the region name as: "table name, start key, end key"
+    List<HRegionInfo> tableRegions = getTableRegions();
+    if (tableRegions != null) {
+      monitor.rethrowException();
+      List<HRegionInfo> regionsToRestore = new LinkedList<HRegionInfo>();
+      List<HRegionInfo> regionsToRemove = new LinkedList<HRegionInfo>();
+
+      for (HRegionInfo regionInfo: tableRegions) {
+        String regionName = regionInfo.getEncodedName();
+        if (snapshotRegionNames.contains(regionName)) {
+          LOG.info("region to restore: " + regionName);
+          snapshotRegionNames.remove(regionInfo);
+          regionsToRestore.add(regionInfo);
+        } else {
+          LOG.info("region to remove: " + regionName);
+          regionsToRemove.add(regionInfo);
+        }
+      }
+
+      // Restore regions using the snapshot data
+      monitor.rethrowException();
+      restoreRegions(regionsToRestore);
+
+      // Remove regions from the current table
+      monitor.rethrowException();
+      ModifyRegionUtils.deleteRegions(fs, catalogTracker, regionsToRemove);
+    }
+
+    // Regions to Add: present in the snapshot but not in the current table
+    if (snapshotRegionNames.size() > 0) {
+      List<HRegionInfo> regionsToAdd = new LinkedList<HRegionInfo>();
+
+      monitor.rethrowException();
+      for (String regionName: snapshotRegionNames) {
+        LOG.info("region to add: " + regionName);
+        Path regionDir = new Path(snapshotDir, regionName);
+        regionsToAdd.add(HRegion.loadDotRegionInfoFileContent(fs, regionDir));
+      }
+
+      // Create new regions cloning from the snapshot
+      monitor.rethrowException();
+      cloneRegions(regionsToAdd);
+    }
+
+    // Restore WALs
+    monitor.rethrowException();
+    restoreWALs();
+  }
+
+  /**
+   * Restore specified regions by restoring content to the snapshot state.
+   */
+  private void restoreRegions(final List<HRegionInfo> regions) throws IOException {
+    if (regions == null || regions.size() == 0) return;
+    for (HRegionInfo hri: regions) restoreRegion(hri);
+  }
+
+  /**
+   * Restore region by removing files not it in the snapshot
+   * and adding the missing ones from the snapshot.
+   */
+  private void restoreRegion(HRegionInfo regionInfo) throws IOException {
+    Path snapshotRegionDir = new Path(snapshotDir, regionInfo.getEncodedName());
+    Map<String, List<String>> snapshotFiles =
+                SnapshotReferenceUtil.getRegionHFileReferences(fs, snapshotRegionDir);
+
+    Path regionDir = new Path(tableDir, regionInfo.getEncodedName());
+    String tableName = tableDesc.getNameAsString();
+
+    for (Map.Entry<String, List<String>> familyEntry: snapshotFiles.entrySet()) {
+      byte[] family = Bytes.toBytes(familyEntry.getKey());
+      Path familyDir = new Path(regionDir, familyEntry.getKey());
+      Set<String> familyFiles = getTableRegionFamilyFiles(familyDir);
+
+      List<String> hfilesToAdd = new LinkedList<String>();
+      for (String hfileName: familyEntry.getValue()) {
+        if (familyFiles.contains(hfileName)) {
+          // HFile already present
+          familyFiles.remove(hfileName);
+        } else {
+          // HFile missing
+          hfilesToAdd.add(hfileName);
+        }
+      }
+
+      // Remove hfiles not present in the snapshot
+      for (String hfileName: familyFiles) {
+        Path hfile = new Path(familyDir, hfileName);
+        LOG.trace("Removing hfile=" + hfile + " from table=" + tableName);
+        HFileArchiver.archiveStoreFile(fs, regionInfo, conf, tableDir, family, hfile);
+      }
+
+      // Restore Missing files
+      for (String hfileName: hfilesToAdd) {
+        LOG.trace("Adding HFileLink " + hfileName + " to table=" + tableName);
+        restoreStoreFile(familyDir, regionInfo, hfileName);
+      }
+    }
+  }
+
+  /**
+   * @return The set of files in the specified family directory.
+   */
+  private Set<String> getTableRegionFamilyFiles(final Path familyDir) throws IOException {
+    Set<String> familyFiles = new HashSet<String>();
+
+    FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir);
+    if (hfiles == null) return familyFiles;
+
+    for (FileStatus hfileRef: hfiles) {
+      String hfileName = hfileRef.getPath().getName();
+      familyFiles.add(hfileName);
+    }
+
+    return familyFiles;
+  }
+
+  /**
+   * Clone specified regions. For each region create a new region
+   * and create a HFileLink for each hfile.
+   */
+  private void cloneRegions(final List<HRegionInfo> regions) throws IOException {
+    if (regions == null || regions.size() == 0) return;
+
+    final Map<String, HRegionInfo> snapshotRegions =
+      new HashMap<String, HRegionInfo>(regions.size());
+
+    // clone region info (change embedded tableName with the new one)
+    HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
+    for (int i = 0; i < clonedRegionsInfo.length; ++i) {
+      // clone the region info from the snapshot region info
+      HRegionInfo snapshotRegionInfo = regions.get(i);
+      clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);
+
+      // add the region name mapping between snapshot and cloned
+      String snapshotRegionName = snapshotRegionInfo.getEncodedName();
+      String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
+      regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
+      LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);
+
+      // Add mapping between cloned region name and snapshot region info
+      snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
+    }
+
+    // create the regions on disk
+    List<HRegionInfo> clonedRegions = ModifyRegionUtils.createRegions(conf, FSUtils.getRootDir(conf),
+      tableDesc, clonedRegionsInfo, catalogTracker, new ModifyRegionUtils.RegionFillTask() {
+        public void fillRegion(final HRegion region) throws IOException {
+          cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName()));
+        }
+      });
+    if (regions != null && regions.size() > 0) {
+      // add regions to .META.
+      MetaEditor.addRegionsToMeta(catalogTracker, clonedRegions);
+    }
+  }
+
+  /**
+   * Clone region directory content from the snapshot info.
+   *
+   * Each region is encoded with the table name, so the cloned region will have
+   * a different region name.
+   *
+   * Instead of copying the hfiles a HFileLink is created.
+   *
+   * @param region {@link HRegion} cloned
+   * @param snapshotRegionInfo
+   */
+  private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo)
+      throws IOException {
+    final Path snapshotRegionDir = new Path(snapshotDir, snapshotRegionInfo.getEncodedName());
+    final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
+    final String tableName = tableDesc.getNameAsString();
+    SnapshotReferenceUtil.visitRegionStoreFiles(fs, snapshotRegionDir,
+      new FSVisitor.StoreFileVisitor() {
+        public void storeFile (final String region, final String family, final String hfile)
+            throws IOException {
+          LOG.info("Adding HFileLink " + hfile + " to table=" + tableName);
+          Path familyDir = new Path(regionDir, family);
+          restoreStoreFile(familyDir, snapshotRegionInfo, hfile);
+        }
+    });
+  }
+
+  /**
+   * Create a new {@link HFileLink} to reference the store file.
+   *
+   * @param familyDir destination directory for the store file
+   * @param regionInfo destination region info for the table
+   * @param hfileName store file name (can be a Reference, HFileLink or simple HFile)
+   */
+  private void restoreStoreFile(final Path familyDir, final HRegionInfo regionInfo,
+      final String hfileName) throws IOException {
+    if (HFileLink.isHFileLink(hfileName)) {
+      HFileLink.createFromHFileLink(conf, fs, familyDir, hfileName);
+    } else {
+      HFileLink.create(conf, fs, familyDir, regionInfo, hfileName);
+    }
+  }
+
+  /**
+   * Create a new {@link HRegionInfo} from the snapshot region info.
+   * Keep the same startKey, endKey, regionId and split information but change
+   * the table name.
+   *
+   * @param snapshotRegionInfo Info for region to clone.
+   * @return the new HRegion instance
+   */
+  public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) {
+    return new HRegionInfo(tableDesc.getName(),
+                      snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(),
+                      snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId());
+  }
+
+  /**
+   * Restore snapshot WALs.
+   *
+   * Global Snapshot keep a reference to region servers logs present during the snapshot.
+   * (/hbase/.snapshot/snapshotName/.logs/hostName/logName)
+   *
+   * Since each log contains different tables data, logs must be split to
+   * extract the table that we are interested in.
+   */
+  private void restoreWALs() throws IOException {
+    final SnapshotLogSplitter logSplitter = new SnapshotLogSplitter(conf, fs, tableDir,
+                                Bytes.toBytes(snapshotDesc.getTable()), regionsMap);
+    try {
+      // Recover.Edits
+      SnapshotReferenceUtil.visitRecoveredEdits(fs, snapshotDir,
+          new FSVisitor.RecoveredEditsVisitor() {
+        public void recoveredEdits (final String region, final String logfile) throws IOException {
+          Path path = SnapshotReferenceUtil.getRecoveredEdits(snapshotDir, region, logfile);
+          logSplitter.splitRecoveredEdit(path);
+        }
+      });
+
+      // Region Server Logs
+      SnapshotReferenceUtil.visitLogFiles(fs, snapshotDir, new FSVisitor.LogFileVisitor() {
+        public void logFile (final String server, final String logfile) throws IOException {
+          logSplitter.splitLog(server, logfile);
+        }
+      });
+    } finally {
+      logSplitter.close();
+    }
+  }
+
+  /**
+   * @return the set of the regions contained in the table
+   */
+  private List<HRegionInfo> getTableRegions() throws IOException {
+    LOG.debug("get table regions: " + tableDir);
+    FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
+    if (regionDirs == null) return null;
+
+    List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
+    for (FileStatus regionDir: regionDirs) {
+      HRegionInfo hri = HRegion.loadDotRegionInfoFileContent(fs, regionDir.getPath());
+      regions.add(hri);
+    }
+    LOG.debug("found " + regions.size() + " regions for table=" + tableDesc.getNameAsString());
+    return regions;
+  }
+
+  /**
+   * Create a new table descriptor cloning the snapshot table schema.
+   *
+   * @param admin
+   * @param snapshotTableDescriptor
+   * @param tableName
+   * @return cloned table descriptor
+   * @throws IOException
+   */
+  public static HTableDescriptor cloneTableSchema(final HTableDescriptor snapshotTableDescriptor,
+      final byte[] tableName) throws IOException {
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    for (HColumnDescriptor hcd: snapshotTableDescriptor.getColumnFamilies()) {
+      htd.addFamily(hcd);
+    }
+    return htd;
+  }
+}

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+/**
+ * Thrown when a snapshot could not be created due to a server-side error when taking the snapshot.
+ */
+@SuppressWarnings("serial")
+public class SnapshotCreationException extends HBaseSnapshotException {
+
+  /**
+   * Used internally by the RPC engine to pass the exception back to the client.
+   * @param msg error message to pass back
+   */
+  public SnapshotCreationException(String msg) {
+    super(msg);
+  }
+
+  /**
+   * Failure to create the specified snapshot
+   * @param msg reason why the snapshot couldn't be completed
+   * @param desc description of the snapshot attempted
+   */
+  public SnapshotCreationException(String msg, SnapshotDescription desc) {
+    super(msg, desc);
+  }
+
+  /**
+   * Failure to create the specified snapshot due to an external cause
+   * @param msg reason why the snapshot couldn't be completed
+   * @param cause root cause of the failure
+   * @param desc description of the snapshot attempted
+   */
+  public SnapshotCreationException(String msg, Throwable cause, SnapshotDescription desc) {
+    super(msg, cause, desc);
+  }
+}

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java?rev=1445814&r1=1445813&r2=1445814&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java Wed Feb 13 18:37:32 2013
@@ -31,8 +31,6 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.snapshot.exception.CorruptedSnapshotException;
-import org.apache.hadoop.hbase.snapshot.exception.SnapshotCreationException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+
+/**
+ * Thrown when the server is looking for a snapshot but can't find the snapshot on the filesystem
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class SnapshotDoesNotExistException extends HBaseSnapshotException {
+  /**
+   * @param msg full description of the failure
+   */
+  public SnapshotDoesNotExistException(String msg) {
+    super(msg);
+  }
+
+  /**
+   * @param desc expected snapshot to find
+   */
+  public SnapshotDoesNotExistException(SnapshotDescription desc) {
+    super("Snapshot '" + desc.getName() +"' doesn't exist on the filesystem", desc);
+  }
+}

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+/**
+ * Thrown when a snapshot exists but should not
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class SnapshotExistsException extends HBaseSnapshotException {
+
+  /**
+   * Failure due to the snapshot already existing
+   * @param msg full description of the failure
+   * @param desc snapshot that was attempted
+   */
+  public SnapshotExistsException(String msg, SnapshotDescription desc) {
+    super(msg, desc);
+  }
+}
\ No newline at end of file

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,198 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.HLogLink;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * If the snapshot has references to one or more log files,
+ * those must be split (each log contains multiple tables and regions)
+ * and must be placed in the region/recovered.edits folder.
+ * (recovered.edits files will be played on region startup)
+ *
+ * In case of Restore: the log can just be split in the recovered.edits folder.
+ * In case of Clone: each entry in the log must be modified to use the new region name.
+ * (region names are encoded with: tableName, startKey, regionIdTimeStamp)
+ *
+ * We can't use the normal split code, because the HLogKey contains the
+ * table name and the region name, and in case of "clone from snapshot"
+ * region name and table name will be different and must be replaced in
+ * the recovered.edits.
+ */
+@InterfaceAudience.Private
+class SnapshotLogSplitter implements Closeable {
+  static final Log LOG = LogFactory.getLog(SnapshotLogSplitter.class);
+
+  private final class LogWriter implements Closeable {
+    private HLog.Writer writer;
+    private Path logFile;
+    private long seqId;
+
+    public LogWriter(final Configuration conf, final FileSystem fs,
+        final Path logDir, long seqId) throws IOException {
+      logFile = new Path(logDir, logFileName(seqId, true));
+      this.writer = HLogFactory.createWriter(fs, logFile, conf);
+      this.seqId = seqId;
+    }
+
+    public void close() throws IOException {
+      writer.close();
+
+      Path finalFile = new Path(logFile.getParent(), logFileName(seqId, false));
+      LOG.debug("LogWriter tmpLogFile=" + logFile + " -> logFile=" + finalFile);
+      fs.rename(logFile, finalFile);
+    }
+
+    public void append(final HLog.Entry entry) throws IOException {
+      writer.append(entry);
+      if (seqId < entry.getKey().getLogSeqNum()) {
+        seqId = entry.getKey().getLogSeqNum();
+      }
+    }
+
+    private String logFileName(long seqId, boolean temp) {
+      String fileName = String.format("%019d", seqId);
+      if (temp) fileName += HLog.RECOVERED_LOG_TMPFILE_SUFFIX;
+      return fileName;
+    }
+  }
+
+  private final Map<byte[], LogWriter> regionLogWriters =
+      new TreeMap<byte[], LogWriter>(Bytes.BYTES_COMPARATOR);
+
+  private final Map<byte[], byte[]> regionsMap;
+  private final Configuration conf;
+  private final byte[] snapshotTableName;
+  private final byte[] tableName;
+  private final Path tableDir;
+  private final FileSystem fs;
+
+  /**
+   * @params tableName snapshot table name
+   * @params regionsMap maps original region names to the new ones.
+   */
+  public SnapshotLogSplitter(final Configuration conf, final FileSystem fs,
+      final Path tableDir, final byte[] snapshotTableName,
+      final Map<byte[], byte[]> regionsMap) {
+    this.regionsMap = regionsMap;
+    this.snapshotTableName = snapshotTableName;
+    this.tableName = Bytes.toBytes(tableDir.getName());
+    this.tableDir = tableDir;
+    this.conf = conf;
+    this.fs = fs;
+  }
+
+  public void close() throws IOException {
+    for (LogWriter writer: regionLogWriters.values()) {
+      writer.close();
+    }
+  }
+
+  public void splitLog(final String serverName, final String logfile) throws IOException {
+    LOG.debug("Restore log=" + logfile + " server=" + serverName +
+              " for snapshotTable=" + Bytes.toString(snapshotTableName) +
+              " to table=" + Bytes.toString(tableName));
+    splitLog(new HLogLink(conf, serverName, logfile).getAvailablePath(fs));
+  }
+
+  public void splitRecoveredEdit(final Path editPath) throws IOException {
+    LOG.debug("Restore recover.edits=" + editPath +
+              " for snapshotTable=" + Bytes.toString(snapshotTableName) +
+              " to table=" + Bytes.toString(tableName));
+    splitLog(editPath);
+  }
+
+  /**
+   * Split the snapshot HLog reference into regions recovered.edits.
+   *
+   * The HLogKey contains the table name and the region name,
+   * and they must be changed to the restored table names.
+   *
+   * @param logPath Snapshot HLog reference path
+   */
+  public void splitLog(final Path logPath) throws IOException {
+    HLog.Reader log = HLogFactory.createReader(fs, logPath, conf);
+    try {
+      HLog.Entry entry;
+      LogWriter writer = null;
+      byte[] regionName = null;
+      byte[] newRegionName = null;
+      while ((entry = log.next()) != null) {
+        HLogKey key = entry.getKey();
+
+        // We're interested only in the snapshot table that we're restoring
+        if (!Bytes.equals(key.getTablename(), snapshotTableName)) continue;
+
+        // Writer for region.
+        if (!Bytes.equals(regionName, key.getEncodedRegionName())) {
+          regionName = key.getEncodedRegionName().clone();
+
+          // Get the new region name in case of clone, or use the original one
+          newRegionName = regionsMap.get(regionName);
+          if (newRegionName == null) newRegionName = regionName;
+
+          writer = getOrCreateWriter(newRegionName, key.getLogSeqNum());
+          LOG.debug("+ regionName=" + Bytes.toString(regionName));
+        }
+
+        // Append Entry
+        key = new HLogKey(newRegionName, tableName,
+                          key.getLogSeqNum(), key.getWriteTime(), key.getClusterId());
+        writer.append(new HLog.Entry(key, entry.getEdit()));
+      }
+    } catch (IOException e) {
+      LOG.warn("Something wrong during the log split", e);
+    } finally {
+      log.close();
+    }
+  }
+
+  /**
+   * Create a LogWriter for specified region if not already created.
+   */
+  private LogWriter getOrCreateWriter(final byte[] regionName, long seqId) throws IOException {
+    LogWriter writer = regionLogWriters.get(regionName);
+    if (writer == null) {
+      Path regionDir = HRegion.getRegionDir(tableDir, Bytes.toString(regionName));
+      Path dir = HLogUtil.getRegionDirRecoveredEditsDir(regionDir);
+      fs.mkdirs(dir);
+
+      writer = new LogWriter(conf, fs, dir, seqId);
+      regionLogWriters.put(regionName, writer);
+    }
+    return(writer);
+  }
+}

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotTask.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotTask.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotTask.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotTask.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+/**
+ * General snapshot operation taken on a regionserver
+ */
+public abstract class SnapshotTask implements ForeignExceptionSnare, Callable<Void>{
+
+  protected final SnapshotDescription snapshot;
+  protected final ForeignExceptionDispatcher errorMonitor;
+
+  /**
+   * @param snapshot Description of the snapshot we are going to operate on
+   * @param monitor listener interested in failures to the snapshot caused by this operation
+   */
+  public SnapshotTask(SnapshotDescription snapshot, ForeignExceptionDispatcher monitor) {
+    assert monitor != null : "ForeignExceptionDispatcher must not be null!";
+    assert snapshot != null : "SnapshotDescription must not be null!";
+    this.snapshot = snapshot;
+    this.errorMonitor = monitor;
+  }
+
+  public void snapshotFailure(String message, Exception e) {
+    ForeignException ee = new ForeignException(message, e);
+    errorMonitor.receive(ee);
+  }
+
+  @Override
+  public void rethrowException() throws ForeignException {
+    this.errorMonitor.rethrowException();
+  }
+
+  @Override
+  public boolean hasException() {
+    return this.errorMonitor.hasException();
+  }
+
+  @Override
+  public ForeignException getException() {
+    return this.errorMonitor.getException();
+  }
+}

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+
+/**
+ * Copy the table info into the snapshot directory
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class TableInfoCopyTask extends SnapshotTask {
+
+  public static final Log LOG = LogFactory.getLog(TableInfoCopyTask.class);
+  private final FileSystem fs;
+  private final Path rootDir;
+
+  /**
+   * Copy the table info for the given table into the snapshot
+   * @param monitor listen for errors while running the snapshot
+   * @param snapshot snapshot for which we are copying the table info
+   * @param fs {@link FileSystem} where the tableinfo is stored (and where the copy will be written)
+   * @param rootDir root of the {@link FileSystem} where the tableinfo is stored
+   */
+  public TableInfoCopyTask(ForeignExceptionDispatcher monitor,
+      SnapshotDescription snapshot, FileSystem fs, Path rootDir) {
+    super(snapshot, monitor);
+    this.rootDir = rootDir;
+    this.fs = fs;
+  }
+
+  @Override
+  public Void call() throws Exception {
+    LOG.debug("Running table info copy.");
+    this.rethrowException();
+    LOG.debug("Attempting to copy table info for snapshot:" + this.snapshot);
+    // get the HTable descriptor
+    HTableDescriptor orig = FSTableDescriptors.getTableDescriptor(fs, rootDir,
+      Bytes.toBytes(this.snapshot.getTable()));
+    this.rethrowException();
+    // write a copy of descriptor to the snapshot directory
+    Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+    FSTableDescriptors.createTableDescriptorForTableDirectory(fs, snapshotDir, orig, false);
+    LOG.debug("Finished copying tableinfo.");
+    return null;
+  }
+}
\ No newline at end of file

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Thrown if a table should be online/offline but is partial open
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class TablePartiallyOpenException extends IOException {
+  private static final long serialVersionUID = 3571982660065058361L;
+
+  public TablePartiallyOpenException() {
+    super();
+  }
+
+  /**
+   * @param s message
+   */
+  public TablePartiallyOpenException(String s) {
+    super(s);
+  }
+
+  /**
+   * @param tableName Name of table that is partial open
+   */
+  public TablePartiallyOpenException(byte[] tableName) {
+    this(Bytes.toString(tableName));
+  }
+}

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,323 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionListener;
+import org.apache.hadoop.hbase.errorhandling.TimeoutExceptionInjector;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+
+/**
+ * Utilities for useful when taking a snapshot
+ */
+public class TakeSnapshotUtils {
+
+  private static final Log LOG = LogFactory.getLog(TakeSnapshotUtils.class);
+
+  private TakeSnapshotUtils() {
+    // private constructor for util class
+  }
+
+  /**
+   * Get the per-region snapshot description location.
+   * <p>
+   * Under the per-snapshot directory, specific files per-region are kept in a similar layout as per
+   * the current directory layout.
+   * @param desc description of the snapshot
+   * @param rootDir root directory for the hbase installation
+   * @param regionName encoded name of the region (see {@link HRegionInfo#encodeRegionName(byte[])})
+   * @return path to the per-region directory for the snapshot
+   */
+  public static Path getRegionSnapshotDirectory(SnapshotDescription desc, Path rootDir,
+      String regionName) {
+    Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
+    return HRegion.getRegionDir(snapshotDir, regionName);
+  }
+
+  /**
+   * Get the home directory for store-level snapshot files.
+   * <p>
+   * Specific files per store are kept in a similar layout as per the current directory layout.
+   * @param regionDir snapshot directory for the parent region, <b>not</b> the standard region
+   *          directory. See {@link #getRegionSnapshotDirectory(SnapshotDescription, Path, String)}
+   * @param family name of the store to snapshot
+   * @return path to the snapshot home directory for the store/family
+   */
+  public static Path getStoreSnapshotDirectory(Path regionDir, String family) {
+    return HStore.getStoreHomedir(regionDir, Bytes.toBytes(family));
+  }
+
+  /**
+   * Get the snapshot directory for each family to be added to the the snapshot
+   * @param snapshot description of the snapshot being take
+   * @param snapshotRegionDir directory in the snapshot where the region directory information
+   *          should be stored
+   * @param families families to be added (can be null)
+   * @return paths to the snapshot directory for each family, in the same order as the families
+   *         passed in
+   */
+  public static List<Path> getFamilySnapshotDirectories(SnapshotDescription snapshot,
+      Path snapshotRegionDir, FileStatus[] families) {
+    if (families == null || families.length == 0) return Collections.emptyList();
+
+    List<Path> familyDirs = new ArrayList<Path>(families.length);
+    for (FileStatus family : families) {
+      // build the reference directory name
+      familyDirs.add(getStoreSnapshotDirectory(snapshotRegionDir, family.getPath().getName()));
+    }
+    return familyDirs;
+  }
+
+  /**
+   * Create a snapshot timer for the master which notifies the monitor when an error occurs
+   * @param snapshot snapshot to monitor
+   * @param conf configuration to use when getting the max snapshot life
+   * @param monitor monitor to notify when the snapshot life expires
+   * @return the timer to use update to signal the start and end of the snapshot
+   */
+  @SuppressWarnings("rawtypes")
+  public static TimeoutExceptionInjector getMasterTimerAndBindToMonitor(SnapshotDescription snapshot,
+      Configuration conf, ForeignExceptionListener monitor) {
+    long maxTime = SnapshotDescriptionUtils.getMaxMasterTimeout(conf, snapshot.getType(),
+      SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
+    return new TimeoutExceptionInjector(monitor, maxTime);
+  }
+
+  /**
+   * Verify that all the expected logs got referenced
+   * @param fs filesystem where the logs live
+   * @param logsDir original logs directory
+   * @param serverNames names of the servers that involved in the snapshot
+   * @param snapshot description of the snapshot being taken
+   * @param snapshotLogDir directory for logs in the snapshot
+   * @throws IOException
+   */
+  public static void verifyAllLogsGotReferenced(FileSystem fs, Path logsDir,
+      Set<String> serverNames, SnapshotDescription snapshot, Path snapshotLogDir)
+      throws IOException {
+    assertTrue(snapshot, "Logs directory doesn't exist in snapshot", fs.exists(logsDir));
+    // for each of the server log dirs, make sure it matches the main directory
+    Multimap<String, String> snapshotLogs = getMapOfServersAndLogs(fs, snapshotLogDir, serverNames);
+    Multimap<String, String> realLogs = getMapOfServersAndLogs(fs, logsDir, serverNames);
+    if (realLogs != null) {
+      assertNotNull(snapshot, "No server logs added to snapshot", snapshotLogs);
+    } else if (realLogs == null) {
+      assertNull(snapshot, "Snapshotted server logs that don't exist", snapshotLogs);
+    }
+
+    // check the number of servers
+    Set<Entry<String, Collection<String>>> serverEntries = realLogs.asMap().entrySet();
+    Set<Entry<String, Collection<String>>> snapshotEntries = snapshotLogs.asMap().entrySet();
+    assertEquals(snapshot, "Not the same number of snapshot and original server logs directories",
+      serverEntries.size(), snapshotEntries.size());
+
+    // verify we snapshotted each of the log files
+    for (Entry<String, Collection<String>> serverLogs : serverEntries) {
+      // if the server is not the snapshot, skip checking its logs
+      if (!serverNames.contains(serverLogs.getKey())) continue;
+      Collection<String> snapshotServerLogs = snapshotLogs.get(serverLogs.getKey());
+      assertNotNull(snapshot, "Snapshots missing logs for server:" + serverLogs.getKey(),
+        snapshotServerLogs);
+
+      // check each of the log files
+      assertEquals(snapshot,
+        "Didn't reference all the log files for server:" + serverLogs.getKey(), serverLogs
+            .getValue().size(), snapshotServerLogs.size());
+      for (String log : serverLogs.getValue()) {
+        assertTrue(snapshot, "Snapshot logs didn't include " + log,
+          snapshotServerLogs.contains(log));
+      }
+    }
+  }
+
+  /**
+   * Verify one of a snapshot's region's recovered.edits, has been at the surface (file names,
+   * length), match the original directory.
+   * @param fs filesystem on which the snapshot had been taken
+   * @param rootDir full path to the root hbase directory
+   * @param regionInfo info for the region
+   * @param snapshot description of the snapshot that was taken
+   * @throws IOException if there is an unexpected error talking to the filesystem
+   */
+  public static void verifyRecoveredEdits(FileSystem fs, Path rootDir, HRegionInfo regionInfo,
+      SnapshotDescription snapshot) throws IOException {
+    Path regionDir = HRegion.getRegionDir(rootDir, regionInfo);
+    Path editsDir = HLogUtil.getRegionDirRecoveredEditsDir(regionDir);
+    Path snapshotRegionDir = TakeSnapshotUtils.getRegionSnapshotDirectory(snapshot, rootDir,
+      regionInfo.getEncodedName());
+    Path snapshotEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(snapshotRegionDir);
+
+    FileStatus[] edits = FSUtils.listStatus(fs, editsDir);
+    FileStatus[] snapshotEdits = FSUtils.listStatus(fs, snapshotEditsDir);
+    if (edits == null) {
+      assertNull(snapshot, "Snapshot has edits but table doesn't", snapshotEdits);
+      return;
+    }
+
+    assertNotNull(snapshot, "Table has edits, but snapshot doesn't", snapshotEdits);
+
+    // check each of the files
+    assertEquals(snapshot, "Not same number of edits in snapshot as table", edits.length,
+      snapshotEdits.length);
+
+    // make sure we have a file with the same name as the original
+    // it would be really expensive to verify the content matches the original
+    for (FileStatus edit : edits) {
+      for (FileStatus sEdit : snapshotEdits) {
+        if (sEdit.getPath().equals(edit.getPath())) {
+          assertEquals(snapshot, "Snapshot file" + sEdit.getPath()
+              + " length not equal to the original: " + edit.getPath(), edit.getLen(),
+            sEdit.getLen());
+          break;
+        }
+      }
+      assertTrue(snapshot, "No edit in snapshot with name:" + edit.getPath(), false);
+    }
+  }
+
+  private static void assertNull(SnapshotDescription snapshot, String msg, Object isNull)
+      throws CorruptedSnapshotException {
+    if (isNull != null) {
+      throw new CorruptedSnapshotException(msg + ", Expected " + isNull + " to be null.", snapshot);
+    }
+  }
+
+  private static void assertNotNull(SnapshotDescription snapshot, String msg, Object notNull)
+      throws CorruptedSnapshotException {
+    if (notNull == null) {
+      throw new CorruptedSnapshotException(msg + ", Expected object to not be null, but was null.",
+          snapshot);
+    }
+  }
+
+  private static void assertTrue(SnapshotDescription snapshot, String msg, boolean isTrue)
+      throws CorruptedSnapshotException {
+    if (!isTrue) {
+      throw new CorruptedSnapshotException(msg + ", Expected true, but was false", snapshot);
+    }
+  }
+
+  /**
+   * Assert that the expect matches the gotten amount
+   * @param msg message to add the to exception
+   * @param expected
+   * @param gotten
+   * @throws CorruptedSnapshotException thrown if the two elements don't match
+   */
+  private static void assertEquals(SnapshotDescription snapshot, String msg, int expected,
+      int gotten) throws CorruptedSnapshotException {
+    if (expected != gotten) {
+      throw new CorruptedSnapshotException(msg + ". Expected:" + expected + ", got:" + gotten,
+          snapshot);
+    }
+  }
+
+  /**
+   * Assert that the expect matches the gotten amount
+   * @param msg message to add the to exception
+   * @param expected
+   * @param gotten
+   * @throws CorruptedSnapshotException thrown if the two elements don't match
+   */
+  private static void assertEquals(SnapshotDescription snapshot, String msg, long expected,
+      long gotten) throws CorruptedSnapshotException {
+    if (expected != gotten) {
+      throw new CorruptedSnapshotException(msg + ". Expected:" + expected + ", got:" + gotten,
+          snapshot);
+    }
+  }
+
+  /**
+   * @param logdir
+   * @param toInclude list of servers to include. If empty or null, returns all servers
+   * @return maps of servers to all their log files. If there is no log directory, returns
+   *         <tt>null</tt>
+   */
+  private static Multimap<String, String> getMapOfServersAndLogs(FileSystem fs, Path logdir,
+      Collection<String> toInclude) throws IOException {
+    // create a path filter based on the passed directories to include
+    PathFilter filter = toInclude == null || toInclude.size() == 0 ? null
+        : new MatchesDirectoryNames(toInclude);
+
+    // get all the expected directories
+    FileStatus[] serverLogDirs = FSUtils.listStatus(fs, logdir, filter);
+    if (serverLogDirs == null) return null;
+
+    // map those into a multimap of servername -> [log files]
+    Multimap<String, String> map = HashMultimap.create();
+    for (FileStatus server : serverLogDirs) {
+      FileStatus[] serverLogs = FSUtils.listStatus(fs, server.getPath(), null);
+      if (serverLogs == null) continue;
+      for (FileStatus log : serverLogs) {
+        map.put(server.getPath().getName(), log.getPath().getName());
+      }
+    }
+    return map;
+  }
+
+  /**
+   * Path filter that only accepts paths where that have a {@link Path#getName()} that is contained
+   * in the specified collection.
+   */
+  private static class MatchesDirectoryNames implements PathFilter {
+
+    Collection<String> paths;
+
+    public MatchesDirectoryNames(Collection<String> dirNames) {
+      this.paths = dirNames;
+    }
+
+    @Override
+    public boolean accept(Path path) {
+      return paths.contains(path.getName());
+    }
+  }
+
+  /**
+   * Get the log directory for a specific snapshot
+   * @param snapshotDir directory where the specific snapshot will be store
+   * @param serverName name of the parent regionserver for the log files
+   * @return path to the log home directory for the archive files.
+   */
+  public static Path getSnapshotHLogsDir(Path snapshotDir, String serverName) {
+    return new Path(snapshotDir, HLogUtil.getHLogDirectoryName(serverName));
+  }
+}
\ No newline at end of file

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/UnexpectedSnapshotException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/UnexpectedSnapshotException.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/UnexpectedSnapshotException.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/UnexpectedSnapshotException.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+/**
+ * General exception when an unexpected error occurs while running a snapshot.
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class UnexpectedSnapshotException extends HBaseSnapshotException {
+
+  /**
+   * General exception for some cause
+   * @param msg reason why the snapshot couldn't be completed
+   * @param cause root cause of the failure
+   * @param snapshot description of the snapshot attempted
+   */
+  public UnexpectedSnapshotException(String msg, Exception cause, SnapshotDescription snapshot) {
+    super(msg, cause, snapshot);
+  }
+
+}

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Exception thrown when we get a request for a snapshot we don't recognize.
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class UnknownSnapshotException extends HBaseSnapshotException {
+
+  /**
+   * @param msg full information about the failure
+   */
+  public UnknownSnapshotException(String msg) {
+    super(msg);
+  }
+
+  public UnknownSnapshotException(String msg, Exception  e) {
+    super(msg, e);
+  }
+
+}
\ No newline at end of file

Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java?rev=1445814&r1=1445813&r2=1445814&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java Wed Feb 13 18:37:32 2013
@@ -40,8 +40,8 @@ import org.apache.hadoop.hbase.master.HM
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.snapshot.exception.SnapshotDoesNotExistException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;

Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java?rev=1445814&r1=1445813&r2=1445814&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java Wed Feb 13 18:37:32 2013
@@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.LargeTests;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
-import org.apache.hadoop.hbase.snapshot.exception.SnapshotCreationException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;

Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java?rev=1445814&r1=1445813&r2=1445814&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java Wed Feb 13 18:37:32 2013
@@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
-import org.apache.hadoop.hbase.snapshot.exception.UnknownSnapshotException;
+import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;

Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java?rev=1445814&r1=1445813&r2=1445814&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java Wed Feb 13 18:37:32 2013
@@ -32,9 +32,9 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.server.snapshot.TakeSnapshotUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
+import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.After;
 import org.junit.AfterClass;



Mime
View raw message