hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chenh...@apache.org
Subject [1/3] hbase git commit: HBASE-15406 Split / merge switch left disabled after early termination of hbck
Date Fri, 15 Apr 2016 07:36:06 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 d37897535 -> 96e9c466d


http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 09479c4..b0a844a 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -9725,6 +9725,540 @@ public final class ZooKeeperProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.SwitchState)
   }
 
+  public interface SplitAndMergeStateOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // optional bool split_enabled = 1;
+    /**
+     * <code>optional bool split_enabled = 1;</code>
+     */
+    boolean hasSplitEnabled();
+    /**
+     * <code>optional bool split_enabled = 1;</code>
+     */
+    boolean getSplitEnabled();
+
+    // optional bool merge_enabled = 2;
+    /**
+     * <code>optional bool merge_enabled = 2;</code>
+     */
+    boolean hasMergeEnabled();
+    /**
+     * <code>optional bool merge_enabled = 2;</code>
+     */
+    boolean getMergeEnabled();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SplitAndMergeState}
+   *
+   * <pre>
+   **
+   * State for split and merge, used in hbck
+   * </pre>
+   */
+  public static final class SplitAndMergeState extends
+      com.google.protobuf.GeneratedMessage
+      implements SplitAndMergeStateOrBuilder {
+    // Use SplitAndMergeState.newBuilder() to construct.
+    private SplitAndMergeState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private SplitAndMergeState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final SplitAndMergeState defaultInstance;
+    public static SplitAndMergeState getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public SplitAndMergeState getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private SplitAndMergeState(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              splitEnabled_ = input.readBool();
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              mergeEnabled_ = input.readBool();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<SplitAndMergeState> PARSER =
+        new com.google.protobuf.AbstractParser<SplitAndMergeState>() {
+      public SplitAndMergeState parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new SplitAndMergeState(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<SplitAndMergeState> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // optional bool split_enabled = 1;
+    public static final int SPLIT_ENABLED_FIELD_NUMBER = 1;
+    private boolean splitEnabled_;
+    /**
+     * <code>optional bool split_enabled = 1;</code>
+     */
+    public boolean hasSplitEnabled() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional bool split_enabled = 1;</code>
+     */
+    public boolean getSplitEnabled() {
+      return splitEnabled_;
+    }
+
+    // optional bool merge_enabled = 2;
+    public static final int MERGE_ENABLED_FIELD_NUMBER = 2;
+    private boolean mergeEnabled_;
+    /**
+     * <code>optional bool merge_enabled = 2;</code>
+     */
+    public boolean hasMergeEnabled() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>optional bool merge_enabled = 2;</code>
+     */
+    public boolean getMergeEnabled() {
+      return mergeEnabled_;
+    }
+
+    private void initFields() {
+      splitEnabled_ = false;
+      mergeEnabled_ = false;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBool(1, splitEnabled_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBool(2, mergeEnabled_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(1, splitEnabled_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(2, mergeEnabled_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) obj;
+
+      boolean result = true;
+      result = result && (hasSplitEnabled() == other.hasSplitEnabled());
+      if (hasSplitEnabled()) {
+        result = result && (getSplitEnabled()
+            == other.getSplitEnabled());
+      }
+      result = result && (hasMergeEnabled() == other.hasMergeEnabled());
+      if (hasMergeEnabled()) {
+        result = result && (getMergeEnabled()
+            == other.getMergeEnabled());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasSplitEnabled()) {
+        hash = (37 * hash) + SPLIT_ENABLED_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getSplitEnabled());
+      }
+      if (hasMergeEnabled()) {
+        hash = (37 * hash) + MERGE_ENABLED_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getMergeEnabled());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.SplitAndMergeState}
+     *
+     * <pre>
+     **
+     * State for split and merge, used in hbck
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeStateOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        splitEnabled_ = false;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        mergeEnabled_ = false;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState build() {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.splitEnabled_ = splitEnabled_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.mergeEnabled_ = mergeEnabled_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.getDefaultInstance()) return this;
+        if (other.hasSplitEnabled()) {
+          setSplitEnabled(other.getSplitEnabled());
+        }
+        if (other.hasMergeEnabled()) {
+          setMergeEnabled(other.getMergeEnabled());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // optional bool split_enabled = 1;
+      private boolean splitEnabled_ ;
+      /**
+       * <code>optional bool split_enabled = 1;</code>
+       */
+      public boolean hasSplitEnabled() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional bool split_enabled = 1;</code>
+       */
+      public boolean getSplitEnabled() {
+        return splitEnabled_;
+      }
+      /**
+       * <code>optional bool split_enabled = 1;</code>
+       */
+      public Builder setSplitEnabled(boolean value) {
+        bitField0_ |= 0x00000001;
+        splitEnabled_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bool split_enabled = 1;</code>
+       */
+      public Builder clearSplitEnabled() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        splitEnabled_ = false;
+        onChanged();
+        return this;
+      }
+
+      // optional bool merge_enabled = 2;
+      private boolean mergeEnabled_ ;
+      /**
+       * <code>optional bool merge_enabled = 2;</code>
+       */
+      public boolean hasMergeEnabled() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional bool merge_enabled = 2;</code>
+       */
+      public boolean getMergeEnabled() {
+        return mergeEnabled_;
+      }
+      /**
+       * <code>optional bool merge_enabled = 2;</code>
+       */
+      public Builder setMergeEnabled(boolean value) {
+        bitField0_ |= 0x00000002;
+        mergeEnabled_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bool merge_enabled = 2;</code>
+       */
+      public Builder clearMergeEnabled() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        mergeEnabled_ = false;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.SplitAndMergeState)
+    }
+
+    static {
+      defaultInstance = new SplitAndMergeState(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.SplitAndMergeState)
+  }
+
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_MetaRegionServer_descriptor;
   private static
@@ -9785,6 +10319,11 @@ public final class ZooKeeperProtos {
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_SwitchState_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_SplitAndMergeState_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -9829,9 +10368,11 @@ public final class ZooKeeperProtos {
       "\n\nlock_owner\030\002 \001(\0132\024.hbase.pb.ServerName" +
       "\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017" +
       "\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\"\036\n\013" +
-      "SwitchState\022\017\n\007enabled\030\001 \001(\010BE\n*org.apac" +
-      "he.hadoop.hbase.protobuf.generatedB\017ZooK" +
-      "eeperProtosH\001\210\001\001\240\001\001"
+      "SwitchState\022\017\n\007enabled\030\001 \001(\010\"B\n\022SplitAnd" +
+      "MergeState\022\025\n\rsplit_enabled\030\001 \001(\010\022\025\n\rmer" +
+      "ge_enabled\030\002 \001(\010BE\n*org.apache.hadoop.hb" +
+      "ase.protobuf.generatedB\017ZooKeeperProtosH" +
+      "\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -9910,6 +10451,12 @@ public final class ZooKeeperProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SwitchState_descriptor,
               new java.lang.String[] { "Enabled", });
+          internal_static_hbase_pb_SplitAndMergeState_descriptor =
+            getDescriptor().getMessageTypes().get(12);
+          internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_SplitAndMergeState_descriptor,
+              new java.lang.String[] { "SplitEnabled", "MergeEnabled", });
           return null;
         }
       };

http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 3fb09f0..a335ec7 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -280,6 +280,7 @@ message SetSplitOrMergeEnabledRequest {
   required bool enabled = 1;
   optional bool synchronous = 2;
   repeated MasterSwitchType switch_types = 3;
+  optional bool skip_lock = 4;
 }
 
 message SetSplitOrMergeEnabledResponse {
@@ -294,6 +295,12 @@ message IsSplitOrMergeEnabledResponse {
   required bool enabled = 1;
 }
 
+message ReleaseSplitOrMergeLockAndRollbackRequest {
+}
+
+message ReleaseSplitOrMergeLockAndRollbackResponse {
+}
+
 message NormalizeRequest {
 }
 
@@ -653,6 +660,12 @@ service MasterService {
     returns(IsSplitOrMergeEnabledResponse);
 
   /**
+   * Release lock and rollback state.
+   */
+  rpc ReleaseSplitOrMergeLockAndRollback(ReleaseSplitOrMergeLockAndRollbackRequest)
+    returns(ReleaseSplitOrMergeLockAndRollbackResponse);
+
+  /**
    * Run region normalizer. Can NOT run for various reasons. Check logs.
    */
   rpc Normalize(NormalizeRequest)

http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-protocol/src/main/protobuf/ZooKeeper.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
index b408db9..4109481 100644
--- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
@@ -173,4 +173,12 @@ message TableLock {
  */
 message SwitchState {
   optional bool enabled = 1;
-}
\ No newline at end of file
+}
+
+/**
+ * State for split and merge, used in hbck
+ */
+message SplitAndMergeState {
+  optional bool split_enabled = 1;
+  optional bool merge_enabled = 2;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index c7bf4af..90471cf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -28,6 +28,7 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
 import org.apache.hadoop.hbase.ipc.PriorityFunction;
@@ -1476,6 +1478,10 @@ public class MasterRpcServices extends RSRpcServices
     try {
       master.checkInitialized();
       boolean newValue = request.getEnabled();
+      boolean skipLock = request.getSkipLock();
+      if (!master.getSplitOrMergeTracker().lock(skipLock)) {
+        throw new DoNotRetryIOException("can't set splitOrMerge switch due to lock");
+      }
       for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) {
         Admin.MasterSwitchType switchType = convert(masterSwitchType);
         boolean oldValue = master.isSplitOrMergeEnabled(switchType);
@@ -1509,6 +1515,24 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
+  public MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse
+  releaseSplitOrMergeLockAndRollback(RpcController controller,
+    MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request) throws ServiceException {
+    try {
+      master.getSplitOrMergeTracker().releaseLockAndRollback();
+    } catch (KeeperException e) {
+      throw new ServiceException(e);
+    } catch (DeserializationException e) {
+      throw new ServiceException(e);
+    } catch (InterruptedException e) {
+      throw new ServiceException(e);
+    }
+    MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.Builder builder =
+      MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.newBuilder();
+    return builder.build();
+  }
+
+  @Override
   public NormalizeResponse normalize(RpcController controller,
       NormalizeRequest request) throws ServiceException {
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 2473d0a..4596b5b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -17,6 +17,16 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Ordering;
+import com.google.common.collect.TreeMultimap;
+import com.google.protobuf.ServiceException;
+
 import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -136,15 +146,6 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.zookeeper.KeeperException;
 
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Ordering;
-import com.google.common.collect.TreeMultimap;
-import com.google.protobuf.ServiceException;
-
 /**
  * HBaseFsck (hbck) is a tool for checking and repairing region consistency and
  * table integrity problems in a corrupted HBase.
@@ -308,6 +309,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
   private Map<TableName, Set<String>> skippedRegions = new HashMap<TableName, Set<String>>();
 
+  ZooKeeperWatcher zkw = null;
   /**
    * List of orphaned table ZNodes
    */
@@ -353,6 +355,7 @@ public class HBaseFsck extends Configured implements Closeable {
         "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),
       getConf().getInt(
         "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));
+    zkw = createZooKeeperWatcher();
   }
 
   private class FileLockCallable implements Callable<FSDataOutputStream> {
@@ -694,7 +697,8 @@ public class HBaseFsck extends Configured implements Closeable {
     }
     boolean[] oldSplitAndMerge = null;
     if (shouldDisableSplitAndMerge()) {
-      oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false,
+      admin.releaseSplitOrMergeLockAndRollback();
+      oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, false,
         Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
     }
 
@@ -711,14 +715,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
       if (shouldDisableSplitAndMerge()) {
         if (oldSplitAndMerge != null) {
-          if (oldSplitAndMerge[0] && oldSplitAndMerge[1]) {
-            admin.setSplitOrMergeEnabled(true, false,
-              Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
-          } else if (oldSplitAndMerge[0]) {
-            admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT);
-          } else if (oldSplitAndMerge[1]) {
-            admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE);
-          }
+          admin.releaseSplitOrMergeLockAndRollback();
         }
       }
     }
@@ -755,6 +752,10 @@ public class HBaseFsck extends Configured implements Closeable {
 
   @Override
   public void close() throws IOException {
+    if (zkw != null) {
+      zkw.close();
+      zkw = null;
+    }
     IOUtils.closeQuietly(admin);
     IOUtils.closeQuietly(meta);
     IOUtils.closeQuietly(connection);
@@ -1652,7 +1653,6 @@ public class HBaseFsck extends Configured implements Closeable {
     HConnectionManager.execute(new HConnectable<Void>(getConf()) {
       @Override
       public Void connect(HConnection connection) throws IOException {
-        ZooKeeperWatcher zkw = createZooKeeperWatcher();
         try {
           for (TableName tableName :
               ZKTableStateClientSideReader.getDisabledOrDisablingTables(zkw)) {
@@ -1662,8 +1662,6 @@ public class HBaseFsck extends Configured implements Closeable {
           throw new IOException(ke);
         } catch (InterruptedException e) {
           throw new InterruptedIOException();
-        } finally {
-          zkw.close();
         }
         return null;
       }
@@ -1786,14 +1784,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
   private ServerName getMetaRegionServerName(int replicaId)
   throws IOException, KeeperException {
-    ZooKeeperWatcher zkw = createZooKeeperWatcher();
-    ServerName sn = null;
-    try {
-      sn = new MetaTableLocator().getMetaRegionLocation(zkw, replicaId);
-    } finally {
-      zkw.close();
-    }
-    return sn;
+    return new MetaTableLocator().getMetaRegionLocation(zkw, replicaId);
   }
 
   /**
@@ -3239,32 +3230,21 @@ public class HBaseFsck extends Configured implements Closeable {
   }
 
   private void checkAndFixTableLocks() throws IOException {
-    ZooKeeperWatcher zkw = createZooKeeperWatcher();
+    TableLockChecker checker = new TableLockChecker(zkw, errors);
+    checker.checkTableLocks();
 
-    try {
-      TableLockChecker checker = new TableLockChecker(zkw, errors);
-      checker.checkTableLocks();
-
-      if (this.fixTableLocks) {
-        checker.fixExpiredTableLocks();
-      }
-    } finally {
-      zkw.close();
+    if (this.fixTableLocks) {
+      checker.fixExpiredTableLocks();
     }
   }
 
   private void checkAndFixReplication() throws IOException {
-    ZooKeeperWatcher zkw = createZooKeeperWatcher();
-    try {
-      ReplicationChecker checker = new ReplicationChecker(getConf(), zkw, connection, errors);
-      checker.checkUnDeletedQueues();
+    ReplicationChecker checker = new ReplicationChecker(getConf(), zkw, connection, errors);
+    checker.checkUnDeletedQueues();
 
-      if (checker.hasUnDeletedQueues() && this.fixReplication) {
-        checker.fixUnDeletedQueues();
-        setShouldRerun();
-      }
-    } finally {
-      zkw.close();
+    if (checker.hasUnDeletedQueues() && this.fixReplication) {
+      checker.fixUnDeletedQueues();
+      setShouldRerun();
     }
   }
 
@@ -3276,47 +3256,41 @@ public class HBaseFsck extends Configured implements Closeable {
    */
   private void checkAndFixOrphanedTableZNodes()
       throws IOException, KeeperException, InterruptedException {
-    ZooKeeperWatcher zkw = createZooKeeperWatcher();
-
-    try {
-      Set<TableName> enablingTables = ZKTableStateClientSideReader.getEnablingTables(zkw);
-      String msg;
-      TableInfo tableInfo;
-
-      for (TableName tableName : enablingTables) {
-        // Check whether the table exists in hbase
-        tableInfo = tablesInfo.get(tableName);
-        if (tableInfo != null) {
-          // Table exists.  This table state is in transit.  No problem for this table.
-          continue;
-        }
-
-        msg = "Table " + tableName + " not found in hbase:meta. Orphaned table ZNode found.";
-        LOG.warn(msg);
-        orphanedTableZNodes.add(tableName);
-        errors.reportError(ERROR_CODE.ORPHANED_ZK_TABLE_ENTRY, msg);
+    Set<TableName> enablingTables = ZKTableStateClientSideReader.getEnablingTables(zkw);
+    String msg;
+    TableInfo tableInfo;
+
+    for (TableName tableName : enablingTables) {
+      // Check whether the table exists in hbase
+      tableInfo = tablesInfo.get(tableName);
+      if (tableInfo != null) {
+        // Table exists.  This table state is in transit.  No problem for this table.
+        continue;
       }
 
-      if (orphanedTableZNodes.size() > 0 && this.fixTableZNodes) {
-        ZKTableStateManager zkTableStateMgr = new ZKTableStateManager(zkw);
+      msg = "Table " + tableName + " not found in hbase:meta. Orphaned table ZNode found.";
+      LOG.warn(msg);
+      orphanedTableZNodes.add(tableName);
+      errors.reportError(ERROR_CODE.ORPHANED_ZK_TABLE_ENTRY, msg);
+    }
 
-        for (TableName tableName : orphanedTableZNodes) {
-          try {
-            // Set the table state to be disabled so that if we made mistake, we can trace
-            // the history and figure it out.
-            // Another choice is to call checkAndRemoveTableState() to delete the orphaned ZNode.
-            // Both approaches works.
-            zkTableStateMgr.setTableState(tableName, ZooKeeperProtos.Table.State.DISABLED);
-          } catch (CoordinatedStateException e) {
-            // This exception should not happen here
-            LOG.error(
-              "Got a CoordinatedStateException while fixing the ENABLING table znode " + tableName,
-              e);
-          }
+    if (orphanedTableZNodes.size() > 0 && this.fixTableZNodes) {
+      ZKTableStateManager zkTableStateMgr = new ZKTableStateManager(zkw);
+
+      for (TableName tableName : orphanedTableZNodes) {
+        try {
+          // Set the table state to be disabled so that if we made mistake, we can trace
+          // the history and figure it out.
+          // Another choice is to call checkAndRemoveTableState() to delete the orphaned ZNode.
+          // Both approaches works.
+          zkTableStateMgr.setTableState(tableName, ZooKeeperProtos.Table.State.DISABLED);
+        } catch (CoordinatedStateException e) {
+          // This exception should not happen here
+          LOG.error(
+            "Got a CoordinatedStateException while fixing the ENABLING table znode " + tableName,
+            e);
         }
       }
-    } finally {
-      zkw.close();
     }
   }
 
@@ -3386,12 +3360,7 @@ public class HBaseFsck extends Configured implements Closeable {
   private void unassignMetaReplica(HbckInfo hi) throws IOException, InterruptedException,
   KeeperException {
     undeployRegions(hi);
-    ZooKeeperWatcher zkw = createZooKeeperWatcher();
-    try {
-      ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(hi.metaEntry.getReplicaId()));
-    } finally {
-      zkw.close();
-    }
+    ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(hi.metaEntry.getReplicaId()));
   }
 
   private void assignMetaReplica(int replicaId)
@@ -4223,7 +4192,12 @@ public class HBaseFsck extends Configured implements Closeable {
    * Disable the split and merge
    */
   public static void setDisableSplitAndMerge() {
-    disableSplitAndMerge = true;
+    setDisableSplitAndMerge(true);
+  }
+
+  @VisibleForTesting
+  public static void setDisableSplitAndMerge(boolean flag) {
+    disableSplitAndMerge = flag;
   }
 
   /**
@@ -4243,7 +4217,7 @@ public class HBaseFsck extends Configured implements Closeable {
   public boolean shouldDisableSplitAndMerge() {
     return fixAny || disableSplitAndMerge;
   }
-
+  
   /**
    * Set summary mode.
    * Print only summary of the tables and status (OK or INCONSISTENT)

http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
index 0d729a1..e548245 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hbase.zookeeper;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
@@ -25,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.zookeeper.KeeperException;
@@ -37,8 +40,13 @@ import org.apache.zookeeper.KeeperException;
 @InterfaceAudience.Private
 public class SplitOrMergeTracker {
 
+  public static final String LOCK = "splitOrMergeLock";
+  public static final String STATE = "splitOrMergeState";
+
   private String splitZnode;
   private String mergeZnode;
+  private String splitOrMergeLock;
+  private ZooKeeperWatcher watcher;
 
   private SwitchStateTracker splitStateTracker;
   private SwitchStateTracker mergeStateTracker;
@@ -49,6 +57,9 @@ public class SplitOrMergeTracker {
       if (ZKUtil.checkExists(watcher, watcher.getSwitchZNode()) < 0) {
         ZKUtil.createAndFailSilent(watcher, watcher.getSwitchZNode());
       }
+      if (ZKUtil.checkExists(watcher, watcher.getSwitchLockZNode()) < 0) {
+        ZKUtil.createAndFailSilent(watcher, watcher.getSwitchLockZNode());
+      }
     } catch (KeeperException e) {
       throw new RuntimeException(e);
     }
@@ -56,8 +67,12 @@ public class SplitOrMergeTracker {
       conf.get("zookeeper.znode.switch.split", "split"));
     mergeZnode = ZKUtil.joinZNode(watcher.getSwitchZNode(),
       conf.get("zookeeper.znode.switch.merge", "merge"));
+
+    splitOrMergeLock = ZKUtil.joinZNode(watcher.getSwitchLockZNode(), LOCK);
+
     splitStateTracker = new SwitchStateTracker(watcher, splitZnode, abortable);
     mergeStateTracker = new SwitchStateTracker(watcher, mergeZnode, abortable);
+    this.watcher = watcher;
   }
 
   public void start() {
@@ -91,6 +106,76 @@ public class SplitOrMergeTracker {
     }
   }
 
+  /**
+   *  rollback the original state and delete lock node.
+   * */
+  public void releaseLockAndRollback()
+    throws KeeperException, DeserializationException, InterruptedException {
+    if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) {
+      List<ZKUtil.ZKUtilOp> ops = new ArrayList<>();
+      rollback(ops);
+      ops.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(splitOrMergeLock));
+      ZKUtil.multiOrSequential(watcher, ops, false);
+    }
+  }
+
+  // If there is old states of switch on zk, do rollback
+  private void rollback(List<ZKUtil.ZKUtilOp> ops) throws KeeperException, InterruptedException, DeserializationException {
+    String splitOrMergeState = ZKUtil.joinZNode(watcher.getSwitchLockZNode(),
+      SplitOrMergeTracker.STATE);
+    if (ZKUtil.checkExists(watcher, splitOrMergeState) != -1) {
+      byte[] bytes = ZKUtil.getData(watcher, splitOrMergeState);
+      ProtobufUtil.expectPBMagicPrefix(bytes);
+      ZooKeeperProtos.SplitAndMergeState.Builder builder =
+        ZooKeeperProtos.SplitAndMergeState.newBuilder();
+      try {
+        int magicLen = ProtobufUtil.lengthOfPBMagic();
+        ProtobufUtil.mergeFrom(builder, bytes, magicLen, bytes.length - magicLen);
+      } catch (IOException e) {
+        throw new DeserializationException(e);
+      }
+      ZooKeeperProtos.SplitAndMergeState splitAndMergeState =  builder.build();
+      splitStateTracker.setSwitchEnabled(splitAndMergeState.hasSplitEnabled());
+      mergeStateTracker.setSwitchEnabled(splitAndMergeState.hasMergeEnabled());
+      ops.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(splitOrMergeState));
+    }
+  }
+
+  /**
+   *  If there is no lock, you could acquire the lock.
+   *  After we create lock on zk, we save original splitOrMerge switches on zk.
+   *  @param skipLock if true, it means we will skip the lock action
+   *                  but we still need to check whether the lock exists or not.
+   *  @return true, lock successfully.  otherwise, false
+   * */
+  public boolean lock(boolean skipLock) throws KeeperException {
+    if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) {
+      return false;
+    }
+    if (skipLock) {
+      return true;
+    }
+    ZKUtil.createAndFailSilent(watcher,  splitOrMergeLock);
+    if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) {
+      saveOriginalState();
+      return true;
+    }
+    return false;
+  }
+
+  private void saveOriginalState() throws KeeperException {
+    boolean splitEnabled = isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT);
+    boolean mergeEnabled = isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE);
+    String splitOrMergeStates = ZKUtil.joinZNode(watcher.getSwitchLockZNode(),
+      SplitOrMergeTracker.STATE);
+    ZooKeeperProtos.SplitAndMergeState.Builder builder
+      = ZooKeeperProtos.SplitAndMergeState.newBuilder();
+    builder.setSplitEnabled(splitEnabled);
+    builder.setMergeEnabled(mergeEnabled);
+    ZKUtil.createSetData(watcher,  splitOrMergeStates,
+      ProtobufUtil.prependPBMagic(builder.build().toByteArray()));
+  }
+
   private static class SwitchStateTracker extends ZooKeeperNodeTracker {
 
     public SwitchStateTracker(ZooKeeperWatcher watcher, String node, Abortable abortable) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
index 477be1e..f8b2877 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
@@ -41,6 +41,7 @@ import java.util.List;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 @Category({MediumTests.class, ClientTests.class})
 public class TestSplitOrMergeStatus {
@@ -77,14 +78,15 @@ public class TestSplitOrMergeStatus {
 
     Admin admin = TEST_UTIL.getHBaseAdmin();
     initSwitchStatus(admin);
-    boolean[] results = admin.setSplitOrMergeEnabled(false, false, Admin.MasterSwitchType.SPLIT);
+    boolean[] results = admin.setSplitOrMergeEnabled(false, false,
+      true, Admin.MasterSwitchType.SPLIT);
     assertEquals(results.length, 1);
     assertTrue(results[0]);
     admin.split(t.getName());
     int count = waitOnSplitOrMerge(t).size();
     assertTrue(orignalCount == count);
 
-    results = admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT);
+    results = admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.SPLIT);
     assertEquals(results.length, 1);
     assertFalse(results[0]);
     admin.split(t.getName());
@@ -109,7 +111,8 @@ public class TestSplitOrMergeStatus {
 
     waitForMergable(admin, name);
     int orignalCount = locator.getAllRegionLocations().size();
-    boolean[] results = admin.setSplitOrMergeEnabled(false, false, Admin.MasterSwitchType.MERGE);
+    boolean[] results = admin.setSplitOrMergeEnabled(false, false,
+      true, Admin.MasterSwitchType.MERGE);
     assertEquals(results.length, 1);
     assertTrue(results[0]);
     List<HRegionInfo> regions = admin.getTableRegions(t.getName());
@@ -120,7 +123,7 @@ public class TestSplitOrMergeStatus {
     assertTrue(orignalCount == count);
 
     waitForMergable(admin, name);
-    results = admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE);
+    results = admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.MERGE);
     assertEquals(results.length, 1);
     assertFalse(results[0]);
     admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(),
@@ -133,7 +136,7 @@ public class TestSplitOrMergeStatus {
   @Test
   public void testMultiSwitches() throws IOException {
     Admin admin = TEST_UTIL.getHBaseAdmin();
-    boolean[] switches = admin.setSplitOrMergeEnabled(false, false,
+    boolean[] switches = admin.setSplitOrMergeEnabled(false, false, true,
       Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
     for (boolean s : switches){
       assertTrue(s);
@@ -143,12 +146,34 @@ public class TestSplitOrMergeStatus {
     admin.close();
   }
 
+  @Test
+  public void testSwitchLock() throws IOException {
+    Admin admin = TEST_UTIL.getHBaseAdmin();
+    admin.setSplitOrMergeEnabled(false, false, false,
+      Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
+    try {
+      admin.setSplitOrMergeEnabled(false, false, true,
+        Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
+      fail();
+    } catch (IOException e) {
+      LOG.info("", e);
+    }
+    admin.releaseSplitOrMergeLockAndRollback();
+    try {
+      admin.setSplitOrMergeEnabled(true, false, true,
+        Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
+    } catch (IOException e) {
+      fail();
+    }
+    admin.close();
+  }
+  
   private void initSwitchStatus(Admin admin) throws IOException {
     if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)) {
-      admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT);
+      admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.SPLIT);
     }
     if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)) {
-      admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE);
+      admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.MERGE);
     }
     assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT));
     assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE));

http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index 7ab4d5d..343f2a9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -37,8 +37,6 @@ import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
-import java.util.Map.Entry;
 import java.util.NavigableMap;
 import java.util.Set;
 import java.util.concurrent.Callable;
@@ -84,12 +82,10 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.MetaScanner;
-import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
@@ -114,7 +110,6 @@ import org.apache.hadoop.hbase.regionserver.SplitTransactionImpl;
 import org.apache.hadoop.hbase.regionserver.TestEndToEndSplitTransaction;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
 import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo;
@@ -136,6 +131,10 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
 import com.google.common.collect.Multimap;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
 
 /**
  * This tests HBaseFsck's ability to detect reasons for inconsistent tables.
@@ -2872,6 +2871,56 @@ public class TestHBaseFsck {
   }
 
 
+  /**
+  *  See HBASE-15406
+  * */
+  @Test
+  public void testSplitOrMergeStatWhenHBCKAbort() throws Exception {
+    admin.setSplitOrMergeEnabled(true, false, true,
+        Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
+    boolean oldSplit = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT);
+    boolean oldMerge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE);
+
+    assertTrue(oldSplit);
+    assertTrue(oldMerge);
+
+    ExecutorService exec = new ScheduledThreadPoolExecutor(10);
+    HBaseFsck hbck = new HBaseFsck(conf, exec);
+    HBaseFsck.setDisplayFullReport(); // i.e. -details
+    final HBaseFsck spiedHbck = spy(hbck);
+    doAnswer(new Answer() {
+        @Override
+        public Object answer(InvocationOnMock invocation) throws Throwable {
+            // we close splitOrMerge flag in hbck, so in finally hbck will not set splitOrMerge back.
+              spiedHbck.setDisableSplitAndMerge(false);
+            return null;
+          }
+      }).when(spiedHbck).onlineConsistencyRepair();
+    spiedHbck.setDisableSplitAndMerge();
+    spiedHbck.connect();
+    spiedHbck.onlineHbck();
+    spiedHbck.close();
+
+    boolean split = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT);
+    boolean merge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE);
+    assertFalse(split);
+    assertFalse(merge);
+
+    // rerun hbck to repair the switches state
+    hbck = new HBaseFsck(conf, exec);
+    hbck.setDisableSplitAndMerge();
+    hbck.connect();
+    hbck.onlineHbck();
+    hbck.close();
+
+    split = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT);
+    merge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE);
+
+    assertTrue(split);
+    assertTrue(merge);
+  }
+
+
   public static class MasterSyncObserver extends BaseMasterObserver {
     volatile CountDownLatch tableCreationLatch = null;
     volatile CountDownLatch tableDeletionLatch = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index a19e30c..89131fb 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -113,7 +113,7 @@ module Hbase
       end
       @admin.setSplitOrMergeEnabled(
         java.lang.Boolean.valueOf(enabled), java.lang.Boolean.valueOf(false),
-        switch_type)[0]
+        java.lang.Boolean.valueOf(true), switch_type)[0]
     end
 
     #----------------------------------------------------------------------------------------------


Mime
View raw message