hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jmhs...@apache.org
Subject svn commit: r1445782 [1/3] - in /hbase/branches/hbase-7290: hbase-common/src/main/java/org/apache/hadoop/hbase/ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ hbase-protocol/src/main/protobuf/ hbase-server/src/main/java/org/ap...
Date Wed, 13 Feb 2013 18:05:55 GMT
Author: jmhsieh
Date: Wed Feb 13 18:05:53 2013
New Revision: 1445782

URL: http://svn.apache.org/r1445782
Log:
HBASE-6836 Offline snapshots (Jesse Yates) 


Added:
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/manage/
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/manage/SnapshotManager.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/TakeSnapshotUtils.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/error/
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/error/SnapshotExceptionSnare.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/error/SnapshotFailureListener.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/task/
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/task/CopyRecoveredEditsTask.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/task/ReferenceRegionHFilesTask.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/task/ReferenceServerWALsTask.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/task/SnapshotTask.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/snapshot/task/TableInfoCopyTask.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/exception/
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/exception/CorruptedSnapshotException.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/exception/HBaseSnapshotException.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/exception/SnapshotCreationException.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/exception/SnapshotDoesNotExistException.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/exception/SnapshotExistsException.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/exception/TablePartiallyOpenException.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/exception/UnexpectedSnapshotException.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/exception/UnknownSnapshotException.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSnapshotFromMaster.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/manage/
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/manage/TestSnapshotManager.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/server/snapshot/
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/server/snapshot/error/
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/server/snapshot/error/TestSnapshotExceptionSnare.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/server/snapshot/task/
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/server/snapshot/task/TestCopyRecoveredEditsTask.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/server/snapshot/task/TestReferenceRegionHFilesTask.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/server/snapshot/task/TestSnapshotTask.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/server/snapshot/task/TestWALReferenceTask.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java
Removed:
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java
    hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotsFromAdmin.java
Modified:
    hbase/branches/hbase-7290/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
    hbase/branches/hbase-7290/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
    hbase/branches/hbase-7290/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java
    hbase/branches/hbase-7290/hbase-protocol/src/main/protobuf/MasterAdmin.proto
    hbase/branches/hbase-7290/hbase-protocol/src/main/protobuf/hbase.proto
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/server/errorhandling/impl/ExceptionOrchestrator.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
    hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java

Modified: hbase/branches/hbase-7290/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1445782&r1=1445781&r2=1445782&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/branches/hbase-7290/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java Wed Feb 13 18:05:53 2013
@@ -741,10 +741,17 @@ public final class HConstants {
   /** Directory under /hbase where archived hfiles are stored */
   public static final String HFILE_ARCHIVE_DIRECTORY = ".archive";
 
+  /**
+   * Name of the directory to store snapshots all snapshots. See SnapshotDescriptionUtils for
+   * remaining snapshot constants; this is here to keep HConstants dependencies at a minimum and
+   * uni-directional.
+   */
+  public static final String SNAPSHOT_DIR_NAME = ".snapshot";
+
   public static final List<String> HBASE_NON_USER_TABLE_DIRS = new ArrayList<String>(
       Arrays.asList(new String[] { HREGION_LOGDIR_NAME, HREGION_OLDLOGDIR_NAME, CORRUPT_DIR_NAME,
           toString(META_TABLE_NAME), toString(ROOT_TABLE_NAME), SPLIT_LOGDIR_NAME,
-          HBCK_SIDELINEDIR_NAME, HFILE_ARCHIVE_DIRECTORY }));
+          HBCK_SIDELINEDIR_NAME, HFILE_ARCHIVE_DIRECTORY, SNAPSHOT_DIR_NAME }));
   
   private HConstants() {
     // Can't be instantiated with this ctor.

Modified: hbase/branches/hbase-7290/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java?rev=1445782&r1=1445781&r2=1445782&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java (original)
+++ hbase/branches/hbase-7290/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java Wed Feb 13 18:05:53 2013
@@ -11321,6 +11321,10 @@ public final class HBaseProtos {
     // optional .SnapshotDescription.Type type = 4 [default = TIMESTAMP];
     boolean hasType();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType();
+    
+    // optional int32 version = 5;
+    boolean hasVersion();
+    int getVersion();
   }
   public static final class SnapshotDescription extends
       com.google.protobuf.GeneratedMessage
@@ -11352,20 +11356,23 @@ public final class HBaseProtos {
     
     public enum Type
         implements com.google.protobuf.ProtocolMessageEnum {
-      TIMESTAMP(0, 0),
-      GLOBAL(1, 1),
+      DISABLED(0, 0),
+      TIMESTAMP(1, 1),
+      GLOBAL(2, 2),
       ;
       
-      public static final int TIMESTAMP_VALUE = 0;
-      public static final int GLOBAL_VALUE = 1;
+      public static final int DISABLED_VALUE = 0;
+      public static final int TIMESTAMP_VALUE = 1;
+      public static final int GLOBAL_VALUE = 2;
       
       
       public final int getNumber() { return value; }
       
       public static Type valueOf(int value) {
         switch (value) {
-          case 0: return TIMESTAMP;
-          case 1: return GLOBAL;
+          case 0: return DISABLED;
+          case 1: return TIMESTAMP;
+          case 2: return GLOBAL;
           default: return null;
         }
       }
@@ -11396,7 +11403,7 @@ public final class HBaseProtos {
       }
       
       private static final Type[] VALUES = {
-        TIMESTAMP, GLOBAL, 
+        DISABLED, TIMESTAMP, GLOBAL, 
       };
       
       public static Type valueOf(
@@ -11504,11 +11511,22 @@ public final class HBaseProtos {
       return type_;
     }
     
+    // optional int32 version = 5;
+    public static final int VERSION_FIELD_NUMBER = 5;
+    private int version_;
+    public boolean hasVersion() {
+      return ((bitField0_ & 0x00000010) == 0x00000010);
+    }
+    public int getVersion() {
+      return version_;
+    }
+    
     private void initFields() {
       name_ = "";
       table_ = "";
       creationTime_ = 0L;
       type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.TIMESTAMP;
+      version_ = 0;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -11538,6 +11556,9 @@ public final class HBaseProtos {
       if (((bitField0_ & 0x00000008) == 0x00000008)) {
         output.writeEnum(4, type_.getNumber());
       }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        output.writeInt32(5, version_);
+      }
       getUnknownFields().writeTo(output);
     }
     
@@ -11563,6 +11584,10 @@ public final class HBaseProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeEnumSize(4, type_.getNumber());
       }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt32Size(5, version_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -11606,6 +11631,11 @@ public final class HBaseProtos {
         result = result &&
             (getType() == other.getType());
       }
+      result = result && (hasVersion() == other.hasVersion());
+      if (hasVersion()) {
+        result = result && (getVersion()
+            == other.getVersion());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -11631,6 +11661,10 @@ public final class HBaseProtos {
         hash = (37 * hash) + TYPE_FIELD_NUMBER;
         hash = (53 * hash) + hashEnum(getType());
       }
+      if (hasVersion()) {
+        hash = (37 * hash) + VERSION_FIELD_NUMBER;
+        hash = (53 * hash) + getVersion();
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       return hash;
     }
@@ -11755,6 +11789,8 @@ public final class HBaseProtos {
         bitField0_ = (bitField0_ & ~0x00000004);
         type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.TIMESTAMP;
         bitField0_ = (bitField0_ & ~0x00000008);
+        version_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000010);
         return this;
       }
       
@@ -11809,6 +11845,10 @@ public final class HBaseProtos {
           to_bitField0_ |= 0x00000008;
         }
         result.type_ = type_;
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000010;
+        }
+        result.version_ = version_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -11837,6 +11877,9 @@ public final class HBaseProtos {
         if (other.hasType()) {
           setType(other.getType());
         }
+        if (other.hasVersion()) {
+          setVersion(other.getVersion());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -11898,6 +11941,11 @@ public final class HBaseProtos {
               }
               break;
             }
+            case 40: {
+              bitField0_ |= 0x00000010;
+              version_ = input.readInt32();
+              break;
+            }
           }
         }
       }
@@ -12021,6 +12069,27 @@ public final class HBaseProtos {
         return this;
       }
       
+      // optional int32 version = 5;
+      private int version_ ;
+      public boolean hasVersion() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      public int getVersion() {
+        return version_;
+      }
+      public Builder setVersion(int value) {
+        bitField0_ |= 0x00000010;
+        version_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearVersion() {
+        bitField0_ = (bitField0_ & ~0x00000010);
+        version_ = 0;
+        onChanged();
+        return this;
+      }
+      
       // @@protoc_insertion_point(builder_scope:SnapshotDescription)
     }
     
@@ -12174,18 +12243,19 @@ public final class HBaseProtos {
       "value\030\002 \002(\t\",\n\rNameBytesPair\022\014\n\004name\030\001 \002" +
       "(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005f" +
       "irst\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\",\n\rNameInt64P" +
-      "air\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\242\001\n\023Sna" +
+      "air\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\301\001\n\023Sna" +
       "pshotDescription\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030" +
       "\002 \001(\t\022\027\n\014creationTime\030\003 \001(\003:\0010\0222\n\004type\030\004" +
       " \001(\0162\031.SnapshotDescription.Type:\tTIMESTA" +
-      "MP\"!\n\004Type\022\r\n\tTIMESTAMP\020\000\022\n\n\006GLOBAL\020\001*r\n" +
-      "\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020" +
-      "\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_O",
-      "R_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*_\n\007Key" +
-      "Type\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010\022\021" +
-      "\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014\n\007" +
-      "MAXIMUM\020\377\001B>\n*org.apache.hadoop.hbase.pr" +
-      "otobuf.generatedB\013HBaseProtosH\001\240\001\001"
+      "MP\022\017\n\007version\030\005 \001(\005\"/\n\004Type\022\014\n\010DISABLED\020" +
+      "\000\022\r\n\tTIMESTAMP\020\001\022\n\n\006GLOBAL\020\002*r\n\013CompareT" +
+      "ype\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUA",
+      "L\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004" +
+      "\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*_\n\007KeyType\022\013\n\007M" +
+      "INIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010\022\021\n\rDELETE_" +
+      "COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014\n\007MAXIMUM\020\377" +
+      "\001B>\n*org.apache.hadoop.hbase.protobuf.ge" +
+      "neratedB\013HBaseProtosH\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -12333,7 +12403,7 @@ public final class HBaseProtos {
           internal_static_SnapshotDescription_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_SnapshotDescription_descriptor,
-              new java.lang.String[] { "Name", "Table", "CreationTime", "Type", },
+              new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", },
               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.class,
               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder.class);
           return null;

Modified: hbase/branches/hbase-7290/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java?rev=1445782&r1=1445781&r2=1445782&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java (original)
+++ hbase/branches/hbase-7290/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java Wed Feb 13 18:05:53 2013
@@ -14840,9 +14840,9 @@ public final class MasterAdminProtos {
   public interface TakeSnapshotResponseOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required int64 expectedTime = 1;
-    boolean hasExpectedTime();
-    long getExpectedTime();
+    // required int64 expectedTimeout = 1;
+    boolean hasExpectedTimeout();
+    long getExpectedTimeout();
   }
   public static final class TakeSnapshotResponse extends
       com.google.protobuf.GeneratedMessage
@@ -14873,25 +14873,25 @@ public final class MasterAdminProtos {
     }
     
     private int bitField0_;
-    // required int64 expectedTime = 1;
-    public static final int EXPECTEDTIME_FIELD_NUMBER = 1;
-    private long expectedTime_;
-    public boolean hasExpectedTime() {
+    // required int64 expectedTimeout = 1;
+    public static final int EXPECTEDTIMEOUT_FIELD_NUMBER = 1;
+    private long expectedTimeout_;
+    public boolean hasExpectedTimeout() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
-    public long getExpectedTime() {
-      return expectedTime_;
+    public long getExpectedTimeout() {
+      return expectedTimeout_;
     }
     
     private void initFields() {
-      expectedTime_ = 0L;
+      expectedTimeout_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
       if (isInitialized != -1) return isInitialized == 1;
       
-      if (!hasExpectedTime()) {
+      if (!hasExpectedTimeout()) {
         memoizedIsInitialized = 0;
         return false;
       }
@@ -14903,7 +14903,7 @@ public final class MasterAdminProtos {
                         throws java.io.IOException {
       getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeInt64(1, expectedTime_);
+        output.writeInt64(1, expectedTimeout_);
       }
       getUnknownFields().writeTo(output);
     }
@@ -14916,7 +14916,7 @@ public final class MasterAdminProtos {
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeInt64Size(1, expectedTime_);
+          .computeInt64Size(1, expectedTimeout_);
       }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
@@ -14941,10 +14941,10 @@ public final class MasterAdminProtos {
       org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse) obj;
       
       boolean result = true;
-      result = result && (hasExpectedTime() == other.hasExpectedTime());
-      if (hasExpectedTime()) {
-        result = result && (getExpectedTime()
-            == other.getExpectedTime());
+      result = result && (hasExpectedTimeout() == other.hasExpectedTimeout());
+      if (hasExpectedTimeout()) {
+        result = result && (getExpectedTimeout()
+            == other.getExpectedTimeout());
       }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
@@ -14955,9 +14955,9 @@ public final class MasterAdminProtos {
     public int hashCode() {
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasExpectedTime()) {
-        hash = (37 * hash) + EXPECTEDTIME_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getExpectedTime());
+      if (hasExpectedTimeout()) {
+        hash = (37 * hash) + EXPECTEDTIMEOUT_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getExpectedTimeout());
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
       return hash;
@@ -15075,7 +15075,7 @@ public final class MasterAdminProtos {
       
       public Builder clear() {
         super.clear();
-        expectedTime_ = 0L;
+        expectedTimeout_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
@@ -15118,7 +15118,7 @@ public final class MasterAdminProtos {
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        result.expectedTime_ = expectedTime_;
+        result.expectedTimeout_ = expectedTimeout_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -15135,15 +15135,15 @@ public final class MasterAdminProtos {
       
       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse other) {
         if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance()) return this;
-        if (other.hasExpectedTime()) {
-          setExpectedTime(other.getExpectedTime());
+        if (other.hasExpectedTimeout()) {
+          setExpectedTimeout(other.getExpectedTimeout());
         }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
       
       public final boolean isInitialized() {
-        if (!hasExpectedTime()) {
+        if (!hasExpectedTimeout()) {
           
           return false;
         }
@@ -15175,7 +15175,7 @@ public final class MasterAdminProtos {
             }
             case 8: {
               bitField0_ |= 0x00000001;
-              expectedTime_ = input.readInt64();
+              expectedTimeout_ = input.readInt64();
               break;
             }
           }
@@ -15184,23 +15184,23 @@ public final class MasterAdminProtos {
       
       private int bitField0_;
       
-      // required int64 expectedTime = 1;
-      private long expectedTime_ ;
-      public boolean hasExpectedTime() {
+      // required int64 expectedTimeout = 1;
+      private long expectedTimeout_ ;
+      public boolean hasExpectedTimeout() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
-      public long getExpectedTime() {
-        return expectedTime_;
+      public long getExpectedTimeout() {
+        return expectedTimeout_;
       }
-      public Builder setExpectedTime(long value) {
+      public Builder setExpectedTimeout(long value) {
         bitField0_ |= 0x00000001;
-        expectedTime_ = value;
+        expectedTimeout_ = value;
         onChanged();
         return this;
       }
-      public Builder clearExpectedTime() {
+      public Builder clearExpectedTimeout() {
         bitField0_ = (bitField0_ & ~0x00000001);
-        expectedTime_ = 0L;
+        expectedTimeout_ = 0L;
         onChanged();
         return this;
       }
@@ -19883,56 +19883,57 @@ public final class MasterAdminProtos {
       "anitorEnabledRequest\"0\n\037IsCatalogJanitor",
       "EnabledResponse\022\r\n\005value\030\001 \002(\010\"=\n\023TakeSn" +
       "apshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Snapsh" +
-      "otDescription\",\n\024TakeSnapshotResponse\022\024\n" +
-      "\014expectedTime\030\001 \002(\003\"\025\n\023ListSnapshotReque" +
-      "st\"?\n\024ListSnapshotResponse\022\'\n\tsnapshots\030" +
-      "\001 \003(\0132\024.SnapshotDescription\"?\n\025DeleteSna" +
-      "pshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Snapsho" +
-      "tDescription\"\030\n\026DeleteSnapshotResponse\"?" +
-      "\n\025IsSnapshotDoneRequest\022&\n\010snapshot\030\001 \001(" +
-      "\0132\024.SnapshotDescription\"U\n\026IsSnapshotDon",
-      "eResponse\022\023\n\004done\030\001 \001(\010:\005false\022&\n\010snapsh" +
-      "ot\030\002 \001(\0132\024.SnapshotDescription2\376\013\n\022Maste" +
-      "rAdminService\0222\n\taddColumn\022\021.AddColumnRe" +
-      "quest\032\022.AddColumnResponse\022;\n\014deleteColum" +
-      "n\022\024.DeleteColumnRequest\032\025.DeleteColumnRe" +
-      "sponse\022;\n\014modifyColumn\022\024.ModifyColumnReq" +
-      "uest\032\025.ModifyColumnResponse\0225\n\nmoveRegio" +
-      "n\022\022.MoveRegionRequest\032\023.MoveRegionRespon" +
-      "se\022;\n\014assignRegion\022\024.AssignRegionRequest" +
-      "\032\025.AssignRegionResponse\022A\n\016unassignRegio",
-      "n\022\026.UnassignRegionRequest\032\027.UnassignRegi" +
-      "onResponse\022>\n\rofflineRegion\022\025.OfflineReg" +
-      "ionRequest\032\026.OfflineRegionResponse\0228\n\013de" +
-      "leteTable\022\023.DeleteTableRequest\032\024.DeleteT" +
-      "ableResponse\0228\n\013enableTable\022\023.EnableTabl" +
-      "eRequest\032\024.EnableTableResponse\022;\n\014disabl" +
-      "eTable\022\024.DisableTableRequest\032\025.DisableTa" +
-      "bleResponse\0228\n\013modifyTable\022\023.ModifyTable" +
-      "Request\032\024.ModifyTableResponse\0228\n\013createT" +
-      "able\022\023.CreateTableRequest\032\024.CreateTableR",
-      "esponse\022/\n\010shutdown\022\020.ShutdownRequest\032\021." +
-      "ShutdownResponse\0225\n\nstopMaster\022\022.StopMas" +
-      "terRequest\032\023.StopMasterResponse\022,\n\007balan" +
-      "ce\022\017.BalanceRequest\032\020.BalanceResponse\022M\n" +
-      "\022setBalancerRunning\022\032.SetBalancerRunning" +
-      "Request\032\033.SetBalancerRunningResponse\022;\n\016" +
-      "runCatalogScan\022\023.CatalogScanRequest\032\024.Ca" +
-      "talogScanResponse\022S\n\024enableCatalogJanito" +
-      "r\022\034.EnableCatalogJanitorRequest\032\035.Enable" +
-      "CatalogJanitorResponse\022\\\n\027isCatalogJanit",
-      "orEnabled\022\037.IsCatalogJanitorEnabledReque" +
-      "st\032 .IsCatalogJanitorEnabledResponse\022L\n\021" +
-      "execMasterService\022\032.CoprocessorServiceRe" +
-      "quest\032\033.CoprocessorServiceResponse\0227\n\010sn" +
-      "apshot\022\024.TakeSnapshotRequest\032\025.TakeSnaps" +
-      "hotResponse\022<\n\rlistSnapshots\022\024.ListSnaps" +
-      "hotRequest\032\025.ListSnapshotResponse\022A\n\016del" +
-      "eteSnapshot\022\026.DeleteSnapshotRequest\032\027.De" +
-      "leteSnapshotResponse\022A\n\016isSnapshotDone\022\026" +
-      ".IsSnapshotDoneRequest\032\027.IsSnapshotDoneR",
-      "esponseBG\n*org.apache.hadoop.hbase.proto" +
-      "buf.generatedB\021MasterAdminProtosH\001\210\001\001\240\001\001"
+      "otDescription\"/\n\024TakeSnapshotResponse\022\027\n" +
+      "\017expectedTimeout\030\001 \002(\003\"\025\n\023ListSnapshotRe" +
+      "quest\"?\n\024ListSnapshotResponse\022\'\n\tsnapsho" +
+      "ts\030\001 \003(\0132\024.SnapshotDescription\"?\n\025Delete" +
+      "SnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Snap" +
+      "shotDescription\"\030\n\026DeleteSnapshotRespons" +
+      "e\"?\n\025IsSnapshotDoneRequest\022&\n\010snapshot\030\001" +
+      " \001(\0132\024.SnapshotDescription\"U\n\026IsSnapshot",
+      "DoneResponse\022\023\n\004done\030\001 \001(\010:\005false\022&\n\010sna" +
+      "pshot\030\002 \001(\0132\024.SnapshotDescription2\376\013\n\022Ma" +
+      "sterAdminService\0222\n\taddColumn\022\021.AddColum" +
+      "nRequest\032\022.AddColumnResponse\022;\n\014deleteCo" +
+      "lumn\022\024.DeleteColumnRequest\032\025.DeleteColum" +
+      "nResponse\022;\n\014modifyColumn\022\024.ModifyColumn" +
+      "Request\032\025.ModifyColumnResponse\0225\n\nmoveRe" +
+      "gion\022\022.MoveRegionRequest\032\023.MoveRegionRes" +
+      "ponse\022;\n\014assignRegion\022\024.AssignRegionRequ" +
+      "est\032\025.AssignRegionResponse\022A\n\016unassignRe",
+      "gion\022\026.UnassignRegionRequest\032\027.UnassignR" +
+      "egionResponse\022>\n\rofflineRegion\022\025.Offline" +
+      "RegionRequest\032\026.OfflineRegionResponse\0228\n" +
+      "\013deleteTable\022\023.DeleteTableRequest\032\024.Dele" +
+      "teTableResponse\0228\n\013enableTable\022\023.EnableT" +
+      "ableRequest\032\024.EnableTableResponse\022;\n\014dis" +
+      "ableTable\022\024.DisableTableRequest\032\025.Disabl" +
+      "eTableResponse\0228\n\013modifyTable\022\023.ModifyTa" +
+      "bleRequest\032\024.ModifyTableResponse\0228\n\013crea" +
+      "teTable\022\023.CreateTableRequest\032\024.CreateTab",
+      "leResponse\022/\n\010shutdown\022\020.ShutdownRequest" +
+      "\032\021.ShutdownResponse\0225\n\nstopMaster\022\022.Stop" +
+      "MasterRequest\032\023.StopMasterResponse\022,\n\007ba" +
+      "lance\022\017.BalanceRequest\032\020.BalanceResponse" +
+      "\022M\n\022setBalancerRunning\022\032.SetBalancerRunn" +
+      "ingRequest\032\033.SetBalancerRunningResponse\022" +
+      ";\n\016runCatalogScan\022\023.CatalogScanRequest\032\024" +
+      ".CatalogScanResponse\022S\n\024enableCatalogJan" +
+      "itor\022\034.EnableCatalogJanitorRequest\032\035.Ena" +
+      "bleCatalogJanitorResponse\022\\\n\027isCatalogJa",
+      "nitorEnabled\022\037.IsCatalogJanitorEnabledRe" +
+      "quest\032 .IsCatalogJanitorEnabledResponse\022" +
+      "L\n\021execMasterService\022\032.CoprocessorServic" +
+      "eRequest\032\033.CoprocessorServiceResponse\0227\n" +
+      "\010snapshot\022\024.TakeSnapshotRequest\032\025.TakeSn" +
+      "apshotResponse\022<\n\rlistSnapshots\022\024.ListSn" +
+      "apshotRequest\032\025.ListSnapshotResponse\022A\n\016" +
+      "deleteSnapshot\022\026.DeleteSnapshotRequest\032\027" +
+      ".DeleteSnapshotResponse\022A\n\016isSnapshotDon" +
+      "e\022\026.IsSnapshotDoneRequest\032\027.IsSnapshotDo",
+      "neResponseBG\n*org.apache.hadoop.hbase.pr" +
+      "otobuf.generatedB\021MasterAdminProtosH\001\210\001\001" +
+      "\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -20256,7 +20257,7 @@ public final class MasterAdminProtos {
           internal_static_TakeSnapshotResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_TakeSnapshotResponse_descriptor,
-              new java.lang.String[] { "ExpectedTime", },
+              new java.lang.String[] { "ExpectedTimeout", },
               org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.class,
               org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.Builder.class);
           internal_static_ListSnapshotRequest_descriptor =

Modified: hbase/branches/hbase-7290/hbase-protocol/src/main/protobuf/MasterAdmin.proto
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-protocol/src/main/protobuf/MasterAdmin.proto?rev=1445782&r1=1445781&r2=1445782&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-protocol/src/main/protobuf/MasterAdmin.proto (original)
+++ hbase/branches/hbase-7290/hbase-protocol/src/main/protobuf/MasterAdmin.proto Wed Feb 13 18:05:53 2013
@@ -182,7 +182,7 @@ message TakeSnapshotRequest{
 }
 
 message TakeSnapshotResponse{
-	required int64 expectedTime = 1;
+	required int64 expectedTimeout = 1;
 }
 
 message ListSnapshotRequest{

Modified: hbase/branches/hbase-7290/hbase-protocol/src/main/protobuf/hbase.proto
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-protocol/src/main/protobuf/hbase.proto?rev=1445782&r1=1445781&r2=1445782&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-protocol/src/main/protobuf/hbase.proto (original)
+++ hbase/branches/hbase-7290/hbase-protocol/src/main/protobuf/hbase.proto Wed Feb 13 18:05:53 2013
@@ -277,8 +277,10 @@ message SnapshotDescription {
 	optional string table = 2; // not needed for delete, but checked for in taking snapshot
 	optional int64 creationTime = 3 [default = 0];
 	enum Type {
-		TIMESTAMP = 0;
-		GLOBAL = 1;
+		DISABLED = 0;
+		TIMESTAMP = 1;
+		GLOBAL = 2;
 	}
 	optional Type type = 4 [default = TIMESTAMP];
+	optional int32 version = 5;
 }

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java?rev=1445782&r1=1445781&r2=1445782&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java Wed Feb 13 18:05:53 2013
@@ -74,7 +74,8 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
 import org.apache.hadoop.hbase.security.KerberosInfo;
 import org.apache.hadoop.hbase.security.TokenInfo;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.exception.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.exception.UnknownSnapshotException;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java?rev=1445782&r1=1445781&r2=1445782&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java Wed Feb 13 18:05:53 2013
@@ -103,10 +103,10 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
+import org.apache.hadoop.hbase.snapshot.exception.HBaseSnapshotException;
+import org.apache.hadoop.hbase.snapshot.exception.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.exception.UnknownSnapshotException;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -2217,14 +2217,15 @@ public class HBaseAdmin implements Abort
     TakeSnapshotResponse response = takeSnapshotAsync(snapshot);
     final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
         .build();
-    IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder().buildPartial();
+    IsSnapshotDoneResponse done = null;
     long start = EnvironmentEdgeManager.currentTimeMillis();
-    long max = response.getExpectedTime();
+    long max = response.getExpectedTimeout();
     long maxPauseTime = max / this.numRetries;
     int tries = 0;
     LOG.debug("Waiting a max of " + max + " ms for snapshot to complete. (max " + maxPauseTime
         + " ms per retry)");
-    while ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done.getDone()) {
+    while (tries == 0
+        || ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done.getDone())) {
       try {
         // sleep a backoff <= pauseTime amount
         long sleep = getPauseTime(tries++);
@@ -2245,9 +2246,10 @@ public class HBaseAdmin implements Abort
         }
       });
     }
+    ;
     if (!done.getDone()) {
       throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
-          + "' wasn't completed in expectedTime:" + max + " ms");
+          + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
     }
   }
 

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java?rev=1445782&r1=1445781&r2=1445782&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java Wed Feb 13 18:05:53 2013
@@ -135,6 +135,7 @@ public abstract class EventHandler imple
     C_M_DELETE_FAMILY         (45, null), // Client asking Master to delete family of table
     C_M_MODIFY_FAMILY         (46, null), // Client asking Master to modify family of table
     C_M_CREATE_TABLE          (47, ExecutorType.MASTER_TABLE_OPERATIONS),   // Client asking Master to create a table
+    C_M_SNAPSHOT_TABLE        (48, ExecutorType.MASTER_TABLE_OPERATIONS),   // Client asking Master to snapshot an offline table
 
     // Updates from master to ZK. This is done by the master and there is
     // nothing to process by either Master or RS

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1445782&r1=1445781&r2=1445782&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Wed Feb 13 18:05:53 2013
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
@@ -49,6 +50,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Chore;
@@ -103,6 +107,7 @@ import org.apache.hadoop.hbase.master.ha
 import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
 import org.apache.hadoop.hbase.master.handler.TableEventHandler;
 import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
+import org.apache.hadoop.hbase.master.snapshot.manage.SnapshotManager;
 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
@@ -113,6 +118,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
@@ -178,10 +184,18 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.exception.HBaseSnapshotException;
+import org.apache.hadoop.hbase.snapshot.exception.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.exception.SnapshotDoesNotExistException;
+import org.apache.hadoop.hbase.snapshot.exception.SnapshotExistsException;
+import org.apache.hadoop.hbase.snapshot.exception.TablePartiallyOpenException;
+import org.apache.hadoop.hbase.snapshot.exception.UnknownSnapshotException;
 import org.apache.hadoop.hbase.trace.SpanReceiverHost;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.InfoServer;
@@ -201,8 +215,6 @@ import org.apache.hadoop.metrics.util.MB
 import org.apache.hadoop.net.DNS;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.Watcher;
-import org.apache.hadoop.hbase.trace.SpanReceiverHost;
-import org.apache.hadoop.hbase.util.FSUtils;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
@@ -327,8 +339,12 @@ Server {
 
   private SpanReceiverHost spanReceiverHost;
 
+
   private Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
 
+  // monitor for snapshot of hbase tables
+  private SnapshotManager snapshotManager;
+
   /**
    * Initializes the HMaster. The steps are as follows:
    * <p>
@@ -503,6 +519,7 @@ Server {
       if (this.serverManager != null) this.serverManager.stop();
       if (this.assignmentManager != null) this.assignmentManager.stop();
       if (this.fileSystemManager != null) this.fileSystemManager.stop();
+      if (this.snapshotManager != null) this.snapshotManager.stop("server shutting down.");
       this.zooKeeper.close();
     }
     LOG.info("HMaster main thread exiting");
@@ -567,6 +584,10 @@ Server {
         ", sessionid=0x" +
         Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
         ", cluster-up flag was=" + wasUp);
+
+    // create the snapshot monitor
+    // TODO should this be config based?
+    this.snapshotManager = new SnapshotManager(this, zooKeeper, this.executorService);
   }
 
   /**
@@ -2409,31 +2430,226 @@ Server {
     return this.hfileCleaner;
   }
 
+  /**
+   * Exposed for TESTING!
+   * @return the underlying snapshot manager
+   */
+  SnapshotManager getSnapshotManagerForTesting() {
+    return this.snapshotManager;
+  }
+
   @Override
   public TakeSnapshotResponse snapshot(RpcController controller, TakeSnapshotRequest request)
       throws ServiceException {
-    throw new ServiceException(new UnsupportedOperationException(
-        "Snapshots are not implemented yet."));
+    LOG.debug("Starting snapshot for:" + request);
+    // get the snapshot information
+    SnapshotDescription snapshot = SnapshotDescriptionUtils.validate(request.getSnapshot(),
+      this.conf);
+
+    // check to see if we already completed the snapshot
+    if (isSnapshotCompleted(snapshot)) {
+      throw new ServiceException(new SnapshotExistsException("Snapshot '" + snapshot.getName()
+          + "' already stored on the filesystem.", snapshot));
+    }
+
+    LOG.debug("No existing snapshot, attempting snapshot...");
+
+    // check to see if the table exists
+    HTableDescriptor desc = null;
+    try {
+      desc = this.tableDescriptors.get(snapshot.getTable());
+    } catch (FileNotFoundException e) {
+      String msg = "Table:" + snapshot.getTable() + " info doesn't exist!";
+      LOG.error(msg);
+      throw new ServiceException(new SnapshotCreationException(msg, e, snapshot));
+    } catch (IOException e) {
+      throw new ServiceException(new SnapshotCreationException(
+          "Error while geting table description for table " + snapshot.getTable(), e, snapshot));
+    }
+    if (desc == null) {
+      throw new ServiceException(new SnapshotCreationException("Table '" + snapshot.getTable()
+          + "' doesn't exist, can't take snapshot.", snapshot));
+    }
+
+    // set the snapshot version, now that we are ready to take it
+    snapshot = snapshot.toBuilder().setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION)
+        .build();
+
+    // if the table is enabled, then have the RS run actually the snapshot work
+    if (this.assignmentManager.getZKTable().isEnabledTable(snapshot.getTable())) {
+      LOG.debug("Table enabled, starting distributed snapshot.");
+      throw new ServiceException(new UnsupportedOperationException(
+          "Enabled table snapshots are not yet supported"));
+    }
+    // For disabled table, snapshot is created by the master
+    else if (this.assignmentManager.getZKTable().isDisabledTable(snapshot.getTable())) {
+      LOG.debug("Table is disabled, running snapshot entirely on master.");
+      try {
+        snapshotManager.snapshotDisabledTable(snapshot);
+      } catch (HBaseSnapshotException e) {
+        throw new ServiceException(e);
+      }
+
+      LOG.debug("Started snapshot: " + snapshot);
+    } else {
+      LOG.error("Can't snapshot table '" + snapshot.getTable()
+          + "', isn't open or closed, we don't know what to do!");
+      throw new ServiceException(new SnapshotCreationException(
+          "Table is not entirely open or closed", new TablePartiallyOpenException(
+              snapshot.getTable() + " isn't fully open."), snapshot));
+    }
+    // send back the max amount of time the client should wait for the snapshot to complete
+    long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(conf, snapshot.getType(),
+      SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
+    return TakeSnapshotResponse.newBuilder().setExpectedTimeout(waitTime).build();
   }
 
+  /**
+   * List the currently available/stored snapshots. Any in-progress snapshots are ignored
+   */
   @Override
   public ListSnapshotResponse listSnapshots(RpcController controller, ListSnapshotRequest request)
       throws ServiceException {
-    throw new ServiceException(new UnsupportedOperationException(
-        "Snapshots are not implemented yet."));
+    try {
+      ListSnapshotResponse.Builder builder = ListSnapshotResponse.newBuilder();
+
+      // first create the snapshot description and check to see if it exists
+      Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(this.getMasterFileSystem()
+          .getRootDir());
+
+      // if there are no snapshots, return an empty list
+      if (!this.getMasterFileSystem().getFileSystem().exists(snapshotDir)) {
+        return builder.build();
+      }
+
+      FileSystem fs = this.getMasterFileSystem().getFileSystem();
+
+      // ignore all the snapshots in progress
+      FileStatus[] snapshots = fs.listStatus(snapshotDir,
+        new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
+      // look through all the completed snapshots
+      for (FileStatus snapshot : snapshots) {
+        Path info = new Path(snapshot.getPath(), SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
+        // if the snapshot is bad
+        if (!fs.exists(info)) {
+          LOG.error("Snapshot information for " + snapshot.getPath() + " doesn't exist");
+          continue;
+        }
+        FSDataInputStream in = null;
+        try {
+          in = fs.open(info);
+          SnapshotDescription desc = SnapshotDescription.parseFrom(in);
+          builder.addSnapshots(desc);
+        } catch (IOException e) {
+          LOG.warn("Found a corrupted snapshot " + snapshot.getPath(), e);
+        } finally {
+          if (in != null) {
+            in.close();
+          }
+        }
+      }
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
   }
 
   @Override
   public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
       DeleteSnapshotRequest request) throws ServiceException {
-    throw new ServiceException(new UnsupportedOperationException(
-        "Snapshots are not implemented yet."));
+    try {
+      // check to see if it is completed
+      if (!isSnapshotCompleted(request.getSnapshot())) {
+        throw new SnapshotDoesNotExistException(request.getSnapshot());
+      }
+
+      String snapshotName = request.getSnapshot().getName();
+      LOG.debug("Deleting snapshot: " + snapshotName);
+      // first create the snapshot description and check to see if it exists
+      Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, this
+          .getMasterFileSystem().getRootDir());
+
+      // delete the existing snapshot
+      if (!this.getMasterFileSystem().getFileSystem().delete(snapshotDir, true)) {
+        throw new ServiceException("Failed to delete snapshot directory: " + snapshotDir);
+      }
+      return DeleteSnapshotResponse.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
   }
 
   @Override
   public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
       IsSnapshotDoneRequest request) throws ServiceException {
-    throw new ServiceException(new UnsupportedOperationException(
-        "Snapshots are not implemented yet."));
+    LOG.debug("Checking to see if snapshot from request:" + request + " is done");
+    try {
+      // check the request to make sure it has a snapshot
+      if (!request.hasSnapshot()) {
+        throw new UnknownSnapshotException(
+            "No snapshot name passed in request, can't figure out which snapshot you want to check.");
+      }
+
+      SnapshotDescription expected = request.getSnapshot();
+      IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
+
+      // check to see if the sentinel exists
+      SnapshotSentinel sentinel = this.snapshotManager.getCurrentSnapshotSentinel();
+      if (sentinel != null) {
+
+        // pass on any failure we find in the sentinel
+        HBaseSnapshotException e = sentinel.getExceptionIfFailed();
+        if (e != null) throw e;
+
+        // get the current snapshot and compare it against the requested
+        SnapshotDescription snapshot = sentinel.getSnapshot();
+        LOG.debug("Have a snapshot to compare:" + snapshot);
+        if (expected.getName().equals(snapshot.getName())) {
+          LOG.trace("Running snapshot (" + snapshot.getName() + ") does match request:"
+              + expected.getName());
+
+          // check to see if we are done
+          if (sentinel.isFinished()) {
+            builder.setDone(true);
+            LOG.debug("Snapshot " + snapshot + " has completed, notifying client.");
+          } else if (LOG.isDebugEnabled()) {
+            LOG.debug("Sentinel isn't finished with snapshot!");
+          }
+          return builder.build();
+        }
+
+      }
+
+      // check to see if the snapshot is already on the fs
+      if (!isSnapshotCompleted(expected)) {
+        throw new UnknownSnapshotException("Snapshot:" + expected.getName()
+            + " is not currently running or one of the known completed snapshots.");
+      }
+
+      builder.setDone(true);
+      return builder.build();
+    } catch (HBaseSnapshotException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  /**
+   * Check to see if the snapshot is one of the currently completed snapshots
+   * @param expected snapshot to check
+   * @return <tt>true</tt> if the snapshot is stored on the {@link FileSystem}, <tt>false</tt> if is
+   *         not stored
+   * @throws IOException if the filesystem throws an unexpected exception
+   */
+  private boolean isSnapshotCompleted(SnapshotDescription snapshot) throws ServiceException {
+    final Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, this
+        .getMasterFileSystem().getRootDir());
+    FileSystem fs = this.getMasterFileSystem().getFileSystem();
+
+    // check to see if the snapshot already exists
+    try {
+      return fs.exists(snapshotDir);
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
   }
 }

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java?rev=1445782&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java Wed Feb 13 18:05:53 2013
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.exception.HBaseSnapshotException;
+
+/**
+ * Watch the current snapshot under process
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface SnapshotSentinel extends Stoppable {
+
+  /**
+   * Check to see if the snapshot is finished, where finished may be success or failure.
+   * @return <tt>false</tt> if the snapshot is still in progress, <tt>true</tt> if the snapshot has
+   *         finished
+   */
+  public boolean isFinished();
+
+  /**
+   * @return the description of the snapshot being run
+   */
+  public SnapshotDescription getSnapshot();
+
+  /**
+   * Get the exception that caused the snapshot to fail, if the snapshot has failed.
+   * @return <tt>null</tt> if the snapshot succeeded, or the {@link HBaseSnapshotException} that
+   *         caused the snapshot to fail.
+   */
+  public HBaseSnapshotException getExceptionIfFailed();
+
+}

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java?rev=1445782&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java Wed Feb 13 18:05:53 2013
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.snapshot;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.executor.EventHandler;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.SnapshotSentinel;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.server.errorhandling.OperationAttemptTimer;
+import org.apache.hadoop.hbase.server.snapshot.TakeSnapshotUtils;
+import org.apache.hadoop.hbase.server.snapshot.error.SnapshotExceptionSnare;
+import org.apache.hadoop.hbase.server.snapshot.task.CopyRecoveredEditsTask;
+import org.apache.hadoop.hbase.server.snapshot.task.ReferenceRegionHFilesTask;
+import org.apache.hadoop.hbase.server.snapshot.task.TableInfoCopyTask;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.exception.HBaseSnapshotException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+
+/**
+ * Take a snapshot of a disabled table.
+ * <p>
+ * Table must exist when taking the snapshot, or results are undefined.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DisabledTableSnapshotHandler extends EventHandler implements SnapshotSentinel {
+  private static final Log LOG = LogFactory.getLog(DisabledTableSnapshotHandler.class);
+
+  private volatile boolean stopped = false;
+
+  protected final Configuration conf;
+  protected final FileSystem fs;
+  protected final Path rootDir;
+
+  private final MasterServices masterServices;
+
+  private final SnapshotDescription snapshot;
+
+  private final Path workingDir;
+
+  private final String tableName;
+
+  private final OperationAttemptTimer timer;
+  private final SnapshotExceptionSnare monitor;
+
+  private final MasterSnapshotVerifier verify;
+
+  /**
+   * @param snapshot descriptor of the snapshot to take
+   * @param server parent server
+   * @param masterServices master services provider
+   * @throws IOException on unexpected error
+   */
+  public DisabledTableSnapshotHandler(SnapshotDescription snapshot, Server server,
+      final MasterServices masterServices)
+      throws IOException {
+    super(server, EventType.C_M_SNAPSHOT_TABLE);
+    this.masterServices = masterServices;
+    this.tableName = snapshot.getTable();
+
+    this.snapshot = snapshot;
+    this.monitor = new SnapshotExceptionSnare(snapshot);
+
+    this.conf = this.masterServices.getConfiguration();
+    this.fs = this.masterServices.getMasterFileSystem().getFileSystem();
+
+    this.rootDir = FSUtils.getRootDir(this.conf);
+    this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+
+    // prepare the verify
+    this.verify = new MasterSnapshotVerifier(masterServices, snapshot, rootDir);
+
+    // setup the timer
+    timer = TakeSnapshotUtils.getMasterTimerAndBindToMonitor(snapshot, conf, monitor);
+  }
+
+  // TODO consider parallelizing these operations since they are independent. Right now its just
+  // easier to keep them serial though
+  @Override
+  public void process() {
+    LOG.info("Running table snapshot operation " + eventType + " on table " + tableName);
+    try {
+      timer.start();
+      // write down the snapshot info in the working directory
+      SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, this.fs);
+
+      // 1. get all the regions hosting this table.
+      List<Pair<HRegionInfo, ServerName>> regionsAndLocations = null;
+      while (regionsAndLocations == null) {
+        try {
+          regionsAndLocations = MetaReader.getTableRegionsAndLocations(
+            this.server.getCatalogTracker(), Bytes.toBytes(tableName), true);
+        } catch (InterruptedException e) {
+          // check to see if we failed, in which case return
+          if (this.monitor.checkForError()) return;
+          // otherwise, just reset the interrupt and keep on going
+          Thread.currentThread().interrupt();
+        }
+      }
+
+      // extract each pair to separate lists
+      Set<String> serverNames = new HashSet<String>();
+      Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
+      for (Pair<HRegionInfo, ServerName> p : regionsAndLocations) {
+        regions.add(p.getFirst());
+        serverNames.add(p.getSecond().toString());
+      }
+
+      // 2. for each region, write all the info to disk
+      LOG.info("Starting to write region info and WALs for regions for offline snapshot:"
+          + snapshot);
+      for (HRegionInfo regionInfo : regions) {
+        // 2.1 copy the regionInfo files to the snapshot
+        Path snapshotRegionDir = TakeSnapshotUtils.getRegionSnapshotDirectory(snapshot, rootDir,
+          regionInfo.getEncodedName());
+        HRegion.writeRegioninfoOnFilesystem(regionInfo, snapshotRegionDir, fs, conf);
+        // check for error for each region
+        monitor.failOnError();
+
+        // 2.2 for each region, copy over its recovered.edits directory
+        Path regionDir = HRegion.getRegionDir(rootDir, regionInfo);
+        new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir).run();
+        monitor.failOnError();
+
+        // 2.3 reference all the files in the region
+        new ReferenceRegionHFilesTask(snapshot, monitor, regionDir, fs, snapshotRegionDir).run();
+        monitor.failOnError();
+      }
+
+      // 3. write the table info to disk
+      LOG.info("Starting to copy tableinfo for offline snapshot:\n" + snapshot);
+      TableInfoCopyTask tableInfo = new TableInfoCopyTask(this.monitor, snapshot, fs,
+          FSUtils.getRootDir(conf));
+      tableInfo.run();
+      monitor.failOnError();
+
+      // 4. verify the snapshot is valid
+      verify.verifySnapshot(this.workingDir, serverNames);
+
+      // 5. complete the snapshot
+      SnapshotDescriptionUtils.completeSnapshot(this.snapshot, this.rootDir, this.workingDir,
+        this.fs);
+
+    } catch (Exception e) {
+      // make sure we capture the exception to propagate back to the client later
+      monitor.snapshotFailure("Failed due to exception:" + e.getMessage(), snapshot, e);
+    } finally {
+      LOG.debug("Marking snapshot" + this.snapshot + " as finished.");
+      this.stopped = true;
+
+      // 6. mark the timer as finished - even if we got an exception, we don't need to time the
+      // operation any further
+      timer.complete();
+
+      LOG.debug("Launching cleanup of working dir:" + workingDir);
+      try {
+        // don't mark the snapshot as a failure if we can't cleanup - the snapshot worked.
+        if (!this.fs.delete(this.workingDir, true)) {
+          LOG.error("Couldn't delete snapshot working directory:" + workingDir);
+        }
+      } catch (IOException e) {
+        LOG.error("Couldn't delete snapshot working directory:" + workingDir);
+      }
+    }
+  }
+
+  @Override
+  public boolean isFinished() {
+    return this.stopped;
+  }
+
+  @Override
+  public SnapshotDescription getSnapshot() {
+    return snapshot;
+  }
+
+  @Override
+  public void stop(String why) {
+    if (this.stopped) return;
+    this.stopped = true;
+    LOG.info("Stopping disabled snapshot because: " + why);
+    // pass along the stop as a failure. This keeps all the 'should I stop running?' logic in a
+    // single place, though it is technically a little bit of an overload of how the error handler
+    // should be used.
+    this.monitor.snapshotFailure("Failing snapshot because server is stopping.", snapshot);
+  }
+
+  @Override
+  public boolean isStopped() {
+    return this.stopped;
+  }
+
+  @Override
+  public HBaseSnapshotException getExceptionIfFailed() {
+    try {
+      this.monitor.failOnError();
+    } catch (HBaseSnapshotException e) {
+      return e;
+    }
+    return null;
+  }
+}
\ No newline at end of file

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java?rev=1445782&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java Wed Feb 13 18:05:53 2013
@@ -0,0 +1,252 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.snapshot;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.server.snapshot.TakeSnapshotUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.exception.CorruptedSnapshotException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+
+/**
+ * General snapshot verification on the master.
+ * <p>
+ * This is a light-weight verification mechanism for all the files in a snapshot. It doesn't attempt
+ * to verify that the files are exact copies (that would be paramount to taking the snapshot
+ * again!), but instead just attempts to ensure that the files match the expected files and are the
+ * same length.
+ * <p>
+ * Current snapshot files checked:
+ * <ol>
+ * <li>SnapshotDescription is readable</li>
+ * <li>Table info is readable</li>
+ * <li>Regions</li>
+ * <ul>
+ * <li>Matching regions in the snapshot as currently in the table</li>
+ * <li>{@link HRegionInfo} matches the current and stored regions</li>
+ * <li>All referenced hfiles have valid names</li>
+ * <li>All the hfiles are present (either in .archive directory in the region)</li>
+ * <li>All recovered.edits files are present (by name) and have the correct file size</li>
+ * </ul>
+ * <li>HLogs for each server running the snapshot have been referenced
+ * <ul>
+ * <li>Only checked for {@link Type#GLOBAL} snapshots</li>
+ * </ul>
+ * </li>
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class MasterSnapshotVerifier {
+
+  private SnapshotDescription snapshot;
+  private FileSystem fs;
+  private Path rootDir;
+  private String tableName;
+  private MasterServices services;
+
+  /**
+   * Build a util for the given snapshot
+   * @param services services for the master
+   * @param snapshot snapshot to check
+   * @param rootDir root directory of the hbase installation.
+   */
+  public MasterSnapshotVerifier(MasterServices services, SnapshotDescription snapshot, Path rootDir) {
+    this.fs = services.getMasterFileSystem().getFileSystem();
+    this.services = services;
+    this.snapshot = snapshot;
+    this.rootDir = rootDir;
+    this.tableName = snapshot.getTable();
+  }
+
+  /**
+   * Verify that the snapshot in the directory is a valid snapshot
+   * @param snapshotDir snapshot directory to check
+   * @param snapshotServers {@link ServerName} of the servers that are involved in the snapshot
+   * @throws CorruptedSnapshotException if the snapshot is invalid
+   * @throws IOException if there is an unexpected connection issue to the filesystem
+   */
+  public void verifySnapshot(Path snapshotDir, Set<String> snapshotServers)
+      throws CorruptedSnapshotException, IOException {
+    // verify snapshot info matches
+    verifySnapshotDescription(snapshotDir);
+
+    // check that tableinfo is a valid table description
+    verifyTableInfo(snapshotDir);
+
+    // check that each region is valid
+    verifyRegions(snapshotDir);
+
+    // check that the hlogs, if they exist, are valid
+    if (shouldCheckLogs(snapshot.getType())) {
+      verifyLogs(snapshotDir, snapshotServers);
+    }
+  }
+
+  /**
+   * Check to see if the snapshot should verify the logs directory based on the type of the logs.
+   * @param type type of snapshot being taken
+   * @return <tt>true</tt> if the logs directory should be verified, <tt>false</tt> otherwise
+   */
+  private boolean shouldCheckLogs(Type type) {
+    // This is better handled in the Type enum via type, but since its PB based, this is the
+    // simplest way to handle it
+    return type.equals(Type.GLOBAL);
+  }
+
+  /**
+   * Check that the snapshot description written in the filesystem matches the current snapshot
+   * @param snapshotDir snapshot directory to check
+   */
+  private void verifySnapshotDescription(Path snapshotDir) throws CorruptedSnapshotException {
+    SnapshotDescription found = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
+    if (!this.snapshot.equals(found)) {
+      throw new CorruptedSnapshotException("Snapshot read (" + found
+          + ") doesn't equal snapshot we ran (" + snapshot + ").", snapshot);
+    }
+  }
+
+  /**
+   * Check that the table descriptor for the snapshot is a valid table descriptor
+   * @param snapshotDir snapshot directory to check
+   */
+  private void verifyTableInfo(Path snapshotDir) throws IOException {
+    FSTableDescriptors.getTableDescriptor(fs, snapshotDir);
+  }
+
+  /**
+   * Check that all the regions in the the snapshot are valid
+   * @param snapshotDir snapshot directory to check
+   * @throws IOException if we can't reach .META. or read the files from the FS
+   */
+  private void verifyRegions(Path snapshotDir) throws IOException {
+    List<HRegionInfo> regions = MetaReader.getTableRegions(this.services.getCatalogTracker(),
+      Bytes.toBytes(tableName));
+    for (HRegionInfo region : regions) {
+      verifyRegion(fs, snapshotDir, region);
+    }
+  }
+
+  /**
+   * Verify that the region (regioninfo, hfiles) are valid
+   * @param snapshotDir snapshot directory to check
+   * @param region the region to check
+   */
+  private void verifyRegion(FileSystem fs, Path snapshotDir, HRegionInfo region) throws IOException {
+    // make sure we have region in the snapshot
+    Path regionDir = new Path(snapshotDir, region.getEncodedName());
+    if (!fs.exists(regionDir)) {
+      throw new CorruptedSnapshotException("No region directory found for region:" + region,
+          snapshot);
+    }
+    // make sure we have the region info in the snapshot
+    Path regionInfo = new Path(regionDir, HRegion.REGIONINFO_FILE);
+    // make sure the file exists
+    if (!fs.exists(regionInfo)) {
+      throw new CorruptedSnapshotException("No region info found for region:" + region, snapshot);
+    }
+    FSDataInputStream in = fs.open(regionInfo);
+    HRegionInfo found = HRegionInfo.parseFrom(in);
+    if (!region.equals(found)) {
+      throw new CorruptedSnapshotException("Found region info (" + found
+          + ") doesn't match expected region:" + region, snapshot);
+    }
+
+    // make sure we have the expected recovered edits files
+    TakeSnapshotUtils.verifyRecoveredEdits(fs, snapshotDir, found, snapshot);
+
+    // check for the existance of each hfile
+    PathFilter familiesDirs = new FSUtils.FamilyDirFilter(fs);
+    FileStatus[] columnFamilies = FSUtils.listStatus(fs, regionDir, familiesDirs);
+    // should we do some checking here to make sure the cfs are correct?
+    if (columnFamilies == null) return;
+
+    // setup the suffixes for the snapshot directories
+    Path tableNameSuffix = new Path(tableName);
+    Path regionNameSuffix = new Path(tableNameSuffix, region.getEncodedName());
+
+    // get the potential real paths
+    Path archivedRegion = new Path(HFileArchiveUtil.getArchivePath(services.getConfiguration()),
+        regionNameSuffix);
+    Path realRegion = new Path(rootDir, regionNameSuffix);
+
+    // loop through each cf and check we can find each of the hfiles
+    for (FileStatus cf : columnFamilies) {
+      FileStatus[] hfiles = FSUtils.listStatus(fs, cf.getPath(), null);
+      // should we check if there should be hfiles?
+      if (hfiles == null || hfiles.length == 0) continue;
+
+      Path realCfDir = new Path(realRegion, cf.getPath().getName());
+      Path archivedCfDir = new Path(archivedRegion, cf.getPath().getName());
+      for (FileStatus hfile : hfiles) {
+        // make sure the name is correct
+        if (!StoreFile.validateStoreFileName(hfile.getPath().getName())) {
+          throw new CorruptedSnapshotException("HFile: " + hfile.getPath()
+              + " is not a valid hfile name.", snapshot);
+        }
+
+        // check to see if hfile is present in the real table
+        String fileName = hfile.getPath().getName();
+        Path file = new Path(realCfDir, fileName);
+        Path archived = new Path(archivedCfDir, fileName);
+        if (!fs.exists(file) && !fs.equals(archived)) {
+          throw new CorruptedSnapshotException("Can't find hfile: " + hfile.getPath()
+              + " in the real (" + archivedCfDir + ") or archive (" + archivedCfDir
+              + ") directory for the primary table.", snapshot);
+        }
+      }
+    }
+  }
+
+  /**
+   * Check that the logs stored in the log directory for the snapshot are valid - it contains all
+   * the expected logs for all servers involved in the snapshot.
+   * @param snapshotDir snapshot directory to check
+   * @param snapshotServers list of the names of servers involved in the snapshot.
+   * @throws CorruptedSnapshotException if the hlogs in the snapshot are not correct
+   * @throws IOException if we can't reach the filesystem
+   */
+  private void verifyLogs(Path snapshotDir, Set<String> snapshotServers)
+      throws CorruptedSnapshotException, IOException {
+    Path snapshotLogDir = new Path(snapshotDir, HConstants.HREGION_LOGDIR_NAME);
+    Path logsDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
+    TakeSnapshotUtils.verifyAllLogsGotReferenced(fs, logsDir, snapshotServers, snapshot,
+      snapshotLogDir);
+  }
+}
\ No newline at end of file

Added: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/manage/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/manage/SnapshotManager.java?rev=1445782&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/manage/SnapshotManager.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/manage/SnapshotManager.java Wed Feb 13 18:05:53 2013
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.snapshot.manage;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.SnapshotSentinel;
+import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.exception.HBaseSnapshotException;
+import org.apache.hadoop.hbase.snapshot.exception.SnapshotCreationException;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * This class monitors the whole process of snapshots via ZooKeeper. There is only one
+ * SnapshotMonitor for the master.
+ * <p>
+ * Start monitoring a snapshot by calling method monitor() before the snapshot is started across the
+ * cluster via ZooKeeper. SnapshotMonitor would stop monitoring this snapshot only if it is finished
+ * or aborted.
+ * <p>
+ * Note: There could be only one snapshot being processed and monitored at a time over the cluster.
+ * Start monitoring a snapshot only when the previous one reaches an end status.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class SnapshotManager implements Stoppable {
+  private static final Log LOG = LogFactory.getLog(SnapshotManager.class);
+
+  // TODO - enable having multiple snapshots with multiple monitors
+
+  private final MasterServices master;
+  private SnapshotSentinel handler;
+  private ExecutorService pool;
+  private final Path rootDir;
+
+  private boolean stopped;
+
+  public SnapshotManager(final MasterServices master, final ZooKeeperWatcher watcher,
+      final ExecutorService executorService) throws KeeperException {
+    this.master = master;
+    this.pool = executorService;
+    this.rootDir = master.getMasterFileSystem().getRootDir();
+  }
+
+  /**
+   * @return <tt>true</tt> if there is a snapshot currently being taken, <tt>false</tt> otherwise
+   */
+  public boolean isTakingSnapshot() {
+    return handler != null && !handler.isFinished();
+  }
+
+  /**
+   * Check to make sure that we are OK to run the passed snapshot. Checks to make sure that we
+   * aren't already running a snapshot.
+   * @param snapshot description of the snapshot we want to start
+   * @throws HBaseSnapshotException if the filesystem could not be prepared to start the snapshot
+   */
+  private synchronized void prepareToTakeSnapshot(SnapshotDescription snapshot)
+      throws HBaseSnapshotException {
+    FileSystem fs = master.getMasterFileSystem().getFileSystem();
+    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+
+    // make sure we aren't already running a snapshot
+    if (isTakingSnapshot()) {
+      throw new SnapshotCreationException("Already running another snapshot:"
+          + this.handler.getSnapshot(), snapshot);
+    }
+
+    try {
+      // delete the working directory, since we aren't running the snapshot
+      fs.delete(workingDir, true);
+
+      // recreate the working directory for the snapshot
+      if (!fs.mkdirs(workingDir)) {
+        throw new SnapshotCreationException("Couldn't create working directory (" + workingDir
+            + ") for snapshot.", snapshot);
+      }
+    } catch (HBaseSnapshotException e) {
+      throw e;
+    } catch (IOException e) {
+      throw new SnapshotCreationException(
+          "Exception while checking to see if snapshot could be started.", e, snapshot);
+    }
+  }
+
+  /**
+   * Take a snapshot of a disabled table.
+   * <p>
+   * Ensures the snapshot won't be started if there is another snapshot already running. Does
+   * <b>not</b> check to see if another snapshot of the same name already exists.
+   * @param snapshot description of the snapshot to take. Modified to be {@link Type#DISABLED}.
+   * @throws HBaseSnapshotException if the snapshot could not be started
+   */
+  public synchronized void snapshotDisabledTable(SnapshotDescription snapshot)
+      throws HBaseSnapshotException {
+    // setup the snapshot
+    prepareToTakeSnapshot(snapshot);
+
+    // set the snapshot to be a disabled snapshot, since the client doesn't know about that
+    snapshot = snapshot.toBuilder().setType(Type.DISABLED).build();
+
+    DisabledTableSnapshotHandler handler;
+    try {
+      handler = new DisabledTableSnapshotHandler(snapshot, this.master, this.master);
+      this.handler = handler;
+      this.pool.submit(handler);
+    } catch (IOException e) {
+      // cleanup the working directory
+      Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
+      try {
+        if (this.master.getMasterFileSystem().getFileSystem().delete(workingDir, true)) {
+          LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:"
+              + snapshot);
+        }
+      } catch (IOException e1) {
+        LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" + snapshot);
+      }
+      // fail the snapshot
+      throw new SnapshotCreationException("Could not build snapshot handler", e, snapshot);
+    }
+  }
+
+  /**
+   * @return the current handler for the snapshot
+   */
+  public SnapshotSentinel getCurrentSnapshotSentinel() {
+    return this.handler;
+  }
+
+  @Override
+  public void stop(String why) {
+    // short circuit
+    if (this.stopped) return;
+    // make sure we get stop
+    this.stopped = true;
+    // pass the stop onto all the listeners
+    if (this.handler != null) this.handler.stop(why);
+  }
+
+  @Override
+  public boolean isStopped() {
+    return this.stopped;
+  }
+
+  /**
+   * Set the handler for the current snapshot
+   * <p>
+   * Exposed for TESTING
+   * @param handler handler the master should use
+   */
+  public void setSnapshotHandlerForTesting(SnapshotSentinel handler) {
+    this.handler = handler;
+  }
+}
\ No newline at end of file



Mime
View raw message