Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id DE52E188DD for ; Fri, 26 Feb 2016 00:52:59 +0000 (UTC) Received: (qmail 37315 invoked by uid 500); 26 Feb 2016 00:52:59 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 37265 invoked by uid 500); 26 Feb 2016 00:52:59 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 37255 invoked by uid 99); 26 Feb 2016 00:52:59 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 26 Feb 2016 00:52:59 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 765ABE8F2F; Fri, 26 Feb 2016 00:52:59 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: chenheng@apache.org To: commits@hbase.apache.org Date: Fri, 26 Feb 2016 00:52:59 -0000 Message-Id: <4c5395f4bd274733a386e627af0cb4cd@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [1/4] hbase git commit: Revert "HBASE-15128 Disable region splits and merges switch in master" Repository: hbase Updated Branches: refs/heads/master 24d481c58 -> bf4fcc30c http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 0240a67..4371739 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -8196,450 +8196,6 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:hbase.pb.TableLock) } - public interface SwitchStateOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional bool enabled = 1; - /** - * optional bool enabled = 1; - */ - boolean hasEnabled(); - /** - * optional bool enabled = 1; - */ - boolean getEnabled(); - } - /** - * Protobuf type {@code hbase.pb.SwitchState} - * - *
-   **
-   * State of the switch.
-   * 
- */ - public static final class SwitchState extends - com.google.protobuf.GeneratedMessage - implements SwitchStateOrBuilder { - // Use SwitchState.newBuilder() to construct. - private SwitchState(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private SwitchState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final SwitchState defaultInstance; - public static SwitchState getDefaultInstance() { - return defaultInstance; - } - - public SwitchState getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private SwitchState( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - enabled_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SwitchState parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SwitchState(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional bool enabled = 1; - public static final int ENABLED_FIELD_NUMBER = 1; - private boolean enabled_; - /** - * optional bool enabled = 1; - */ - public boolean hasEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional bool enabled = 1; - */ - public boolean getEnabled() { - return enabled_; - } - - private void initFields() { - enabled_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, enabled_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, enabled_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState) obj; - - boolean result = true; - result = result && (hasEnabled() == other.hasEnabled()); - if (hasEnabled()) { - result = result && (getEnabled() - == other.getEnabled()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasEnabled()) { - hash = (37 * hash) + ENABLED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getEnabled()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.SwitchState} - * - *
-     **
-     * State of the switch.
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchStateOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - enabled_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.enabled_ = enabled_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.getDefaultInstance()) return this; - if (other.hasEnabled()) { - setEnabled(other.getEnabled()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional bool enabled = 1; - private boolean enabled_ ; - /** - * optional bool enabled = 1; - */ - public boolean hasEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional bool enabled = 1; - */ - public boolean getEnabled() { - return enabled_; - } - /** - * optional bool enabled = 1; - */ - public Builder setEnabled(boolean value) { - bitField0_ |= 0x00000001; - enabled_ = value; - onChanged(); - return this; - } - /** - * optional bool enabled = 1; - */ - public Builder clearEnabled() { - bitField0_ = (bitField0_ & ~0x00000001); - enabled_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.SwitchState) - } - - static { - defaultInstance = new SwitchState(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.SwitchState) - } - private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_MetaRegionServer_descriptor; private static @@ -8690,11 +8246,6 @@ public final class ZooKeeperProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_TableLock_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_SwitchState_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_SwitchState_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -8735,10 +8286,9 @@ public final class ZooKeeperProtos { "\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.TableNam" + "e\022(\n\nlock_owner\030\002 \001(\0132\024.hbase.pb.ServerN", "ame\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(" + - "\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\"" + - "\036\n\013SwitchState\022\017\n\007enabled\030\001 \001(\010BE\n*org.a" + - "pache.hadoop.hbase.protobuf.generatedB\017Z" + - "ooKeeperProtosH\001\210\001\001\240\001\001" + "\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003B" + + "E\n*org.apache.hadoop.hbase.protobuf.gene" + + "ratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -8805,12 +8355,6 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TableLock_descriptor, new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", }); - internal_static_hbase_pb_SwitchState_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_hbase_pb_SwitchState_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_SwitchState_descriptor, - new java.lang.String[] { "Enabled", }); return null; } }; http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-protocol/src/main/protobuf/Master.proto ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 79bb862..aa31a5e 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -279,29 +279,6 @@ message IsBalancerEnabledResponse { required bool enabled = 1; } -enum MasterSwitchType { - SPLIT = 0; - MERGE = 1; -} - -message SetSplitOrMergeEnabledRequest { - required bool enabled = 1; - optional bool synchronous = 2; - repeated MasterSwitchType switch_types = 3; -} - -message SetSplitOrMergeEnabledResponse { - repeated bool prev_value = 1; -} - -message IsSplitOrMergeEnabledRequest { - required MasterSwitchType switch_type = 1; -} - -message IsSplitOrMergeEnabledResponse { - required bool enabled = 1; -} - message NormalizeRequest { } @@ -656,19 +633,6 @@ service MasterService { returns(IsBalancerEnabledResponse); /** - * Turn the split or merge switch on or off. - * If synchronous is true, it waits until current operation call, if outstanding, to return. - */ - rpc SetSplitOrMergeEnabled(SetSplitOrMergeEnabledRequest) - returns(SetSplitOrMergeEnabledResponse); - - /** - * Query whether the split or merge switch is on/off. - */ - rpc IsSplitOrMergeEnabled(IsSplitOrMergeEnabledRequest) - returns(IsSplitOrMergeEnabledResponse); - - /** * Run region normalizer. Can NOT run for various reasons. Check logs. */ rpc Normalize(NormalizeRequest) http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-protocol/src/main/protobuf/ZooKeeper.proto ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index 4963c09..54652af 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -153,10 +153,3 @@ message TableLock { optional string purpose = 5; optional int64 create_time = 6; } - -/** - * State of the switch. - */ -message SwitchState { - optional bool enabled = 1; -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 1110db3..53a080e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -62,7 +62,6 @@ import org.apache.hadoop.hbase.RegionStateListener; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableState; @@ -2354,11 +2353,6 @@ public class AssignmentManager { return hri.getShortNameToLog() + " is not opening on " + serverName; } - if (!((HMaster)server).getSplitOrMergeTracker().isSplitOrMergeEnabled( - Admin.MasterSwitchType.SPLIT)) { - return "split switch is off!"; - } - // Just return in case of retrying if (current.isSplitting()) { return null; @@ -2517,10 +2511,6 @@ public class AssignmentManager { return "Merging daughter region already exists, p=" + current; } - if (!((HMaster)server).getSplitOrMergeTracker().isSplitOrMergeEnabled( - Admin.MasterSwitchType.MERGE)) { - return "merge switch is off!"; - } // Just return in case of retrying if (current != null) { return null; http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 6806c2d..5d8c325 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -76,7 +76,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableState; @@ -156,7 +155,6 @@ import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; -import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTrackerManager; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -255,9 +253,6 @@ public class HMaster extends HRegionServer implements MasterServices { // Tracker for load balancer state LoadBalancerTracker loadBalancerTracker; - // Tracker for split and merge state - SplitOrMergeTrackerManager splitOrMergeTracker; - // Tracker for region normalizer state private RegionNormalizerTracker regionNormalizerTracker; @@ -583,13 +578,8 @@ public class HMaster extends HRegionServer implements MasterServices { this.normalizer.setMasterServices(this); this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this); this.loadBalancerTracker.start(); - this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this); this.regionNormalizerTracker.start(); - - this.splitOrMergeTracker = new SplitOrMergeTrackerManager(zooKeeper, conf, this); - this.splitOrMergeTracker.start(); - this.assignmentManager = new AssignmentManager(this, serverManager, this.balancer, this.service, this.metricsMaster, this.tableLockManager, tableStateManager); @@ -2793,20 +2783,6 @@ public class HMaster extends HRegionServer implements MasterServices { return null == regionNormalizerTracker? false: regionNormalizerTracker.isNormalizerOn(); } - - /** - * Queries the state of the {@link SplitOrMergeTrackerManager}. If it is not initialized, - * false is returned. If switchType is illegal, false will return. - * @param switchType see {@link org.apache.hadoop.hbase.client.Admin.MasterSwitchType} - * @return The state of the switch - */ - public boolean isSplitOrMergeEnabled(Admin.MasterSwitchType switchType) { - if (null == splitOrMergeTracker) { - return false; - } - return splitOrMergeTracker.isSplitOrMergeEnabled(switchType); - } - /** * Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned. * @@ -2823,8 +2799,4 @@ public class HMaster extends HRegionServer implements MasterServices { public RegionNormalizerTracker getRegionNormalizerTracker() { return regionNormalizerTracker; } - - public SplitOrMergeTrackerManager getSplitOrMergeTracker() { - return splitOrMergeTracker; - } } http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 1c770d1..1dd4c14 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.exceptions.MergeRegionException; @@ -1507,35 +1506,6 @@ public class MasterRpcServices extends RSRpcServices } @Override - public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller, - SetSplitOrMergeEnabledRequest request) throws ServiceException { - SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder(); - try { - master.checkInitialized(); - for (MasterSwitchType masterSwitchType : request.getSwitchTypesList()) { - Admin.MasterSwitchType switchType = convert(masterSwitchType); - boolean oldValue = master.isSplitOrMergeEnabled(switchType); - boolean newValue = request.getEnabled(); - master.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, switchType); - response.addPrevValue(oldValue); - } - } catch (IOException e) { - throw new ServiceException(e); - } catch (KeeperException e) { - throw new ServiceException(e); - } - return response.build(); - } - - @Override - public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller, - IsSplitOrMergeEnabledRequest request) throws ServiceException { - IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder(); - response.setEnabled(master.isSplitOrMergeEnabled(convert(request.getSwitchType()))); - return response.build(); - } - - @Override public NormalizeResponse normalize(RpcController controller, NormalizeRequest request) throws ServiceException { try { @@ -1604,16 +1574,4 @@ public class MasterRpcServices extends RSRpcServices } return response.build(); } - - private Admin.MasterSwitchType convert(MasterSwitchType switchType) { - switch (switchType) { - case SPLIT: - return Admin.MasterSwitchType.SPLIT; - case MERGE: - return Admin.MasterSwitchType.MERGE; - default: - break; - } - return null; - } } http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 93287ad..a9113ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -235,7 +235,6 @@ public class HBaseFsck extends Configured implements Closeable { private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older private static boolean forceExclusive = false; // only this hbck can modify HBase private static boolean disableBalancer = false; // disable load balancer to keep regions stable - private static boolean disableSplitAndMerge = false; // disable split and merge private boolean fixAssignments = false; // fix assignment errors? private boolean fixMeta = false; // fix meta errors? private boolean checkHdfs = true; // load and check fs consistency? @@ -684,11 +683,6 @@ public class HBaseFsck extends Configured implements Closeable { if (shouldDisableBalancer()) { oldBalancer = admin.setBalancerRunning(false, true); } - boolean[] oldSplitAndMerge = null; - if (shouldDisableSplitAndMerge()) { - oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); - } try { onlineConsistencyRepair(); @@ -700,17 +694,6 @@ public class HBaseFsck extends Configured implements Closeable { if (shouldDisableBalancer() && oldBalancer) { admin.setBalancerRunning(oldBalancer, false); } - - if (shouldDisableSplitAndMerge()) { - if (oldSplitAndMerge != null) { - if (oldSplitAndMerge[0]) { - admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT); - } - if (oldSplitAndMerge[1]) { - admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE); - } - } - } } if (checkRegionBoundaries) { @@ -4201,13 +4184,6 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Disable the split and merge - */ - public static void setDisableSplitAndMerge() { - disableSplitAndMerge = true; - } - - /** * The balancer should be disabled if we are modifying HBase. * It can be disabled if you want to prevent region movement from causing * false positives. @@ -4217,15 +4193,6 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * The split and merge should be disabled if we are modifying HBase. - * It can be disabled if you want to prevent region movement from causing - * false positives. - */ - public boolean shouldDisableSplitAndMerge() { - return fixAny || disableSplitAndMerge; - } - - /** * Set summary mode. * Print only summary of the tables and status (OK or INCONSISTENT) */ @@ -4584,8 +4551,6 @@ public class HBaseFsck extends Configured implements Closeable { setForceExclusive(); } else if (cmd.equals("-disableBalancer")) { setDisableBalancer(); - } else if (cmd.equals("-disableSplitAndMerge")) { - setDisableSplitAndMerge(); } else if (cmd.equals("-timelag")) { if (i == args.length - 1) { errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -timelag needs a value."); http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTrackerManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTrackerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTrackerManager.java deleted file mode 100644 index 1495dd1..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTrackerManager.java +++ /dev/null @@ -1,151 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.zookeeper.KeeperException; - - -/** - * Tracks the switch of split and merge states in ZK - * - */ -@InterfaceAudience.Private -public class SplitOrMergeTrackerManager { - - private String splitZnode; - private String mergeZnode; - - private SwitchStateTracker splitStateTracker; - private SwitchStateTracker mergeStateTracker; - - public SplitOrMergeTrackerManager(ZooKeeperWatcher watcher, Configuration conf, - Abortable abortable) { - try { - if (ZKUtil.checkExists(watcher, watcher.getSwitchZNode()) < 0) { - ZKUtil.createAndFailSilent(watcher, watcher.getSwitchZNode()); - } - } catch (KeeperException e) { - throw new RuntimeException(e); - } - splitZnode = ZKUtil.joinZNode(watcher.getSwitchZNode(), - conf.get("zookeeper.znode.switch.split", "split")); - mergeZnode = ZKUtil.joinZNode(watcher.getSwitchZNode(), - conf.get("zookeeper.znode.switch.merge", "merge")); - splitStateTracker = new SwitchStateTracker(watcher, splitZnode, abortable); - mergeStateTracker = new SwitchStateTracker(watcher, mergeZnode, abortable); - } - - public void start() { - splitStateTracker.start(); - mergeStateTracker.start(); - } - - public boolean isSplitOrMergeEnabled(Admin.MasterSwitchType switchType) { - switch (switchType) { - case SPLIT: - return splitStateTracker.isSwitchEnabled(); - case MERGE: - return mergeStateTracker.isSwitchEnabled(); - default: - break; - } - return false; - } - - public void setSplitOrMergeEnabled(boolean enabled, Admin.MasterSwitchType switchType) - throws KeeperException { - switch (switchType) { - case SPLIT: - splitStateTracker.setSwitchEnabled(enabled); - break; - case MERGE: - mergeStateTracker.setSwitchEnabled(enabled); - break; - default: - break; - } - } - - private static class SwitchStateTracker extends ZooKeeperNodeTracker { - - public SwitchStateTracker(ZooKeeperWatcher watcher, String node, Abortable abortable) { - super(watcher, node, abortable); - } - - /** - * Return true if the switch is on, false otherwise - */ - public boolean isSwitchEnabled() { - byte [] upData = super.getData(false); - try { - // if data in ZK is null, use default of on. - return upData == null || parseFrom(upData).getEnabled(); - } catch (DeserializationException dex) { - LOG.error("ZK state for LoadBalancer could not be parsed " + Bytes.toStringBinary(upData)); - // return false to be safe. - return false; - } - } - - /** - * Set the switch on/off - * @param enabled switch enabled or not? - * @throws KeeperException keepException will be thrown out - */ - public void setSwitchEnabled(boolean enabled) throws KeeperException { - byte [] upData = toByteArray(enabled); - try { - ZKUtil.setData(watcher, node, upData); - } catch(KeeperException.NoNodeException nne) { - ZKUtil.createAndWatch(watcher, node, upData); - } - super.nodeDataChanged(node); - } - - private byte [] toByteArray(boolean enabled) { - SwitchState.Builder builder = SwitchState.newBuilder(); - builder.setEnabled(enabled); - return ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - } - - private SwitchState parseFrom(byte [] bytes) - throws DeserializationException { - ProtobufUtil.expectPBMagicPrefix(bytes); - SwitchState.Builder builder = SwitchState.newBuilder(); - try { - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ProtobufUtil.mergeFrom(builder, bytes, magicLen, bytes.length - magicLen); - } catch (IOException e) { - throw new DeserializationException(e); - } - return builder.build(); - } - } - - -} http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java deleted file mode 100644 index 6405a14..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java +++ /dev/null @@ -1,198 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.JVMClusterUtil; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import java.io.IOException; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Category({MediumTests.class, ClientTests.class}) -public class TestSplitOrMergeStatus { - - private static final Log LOG = LogFactory.getLog(TestSplitOrMergeStatus.class); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); - - /** - * @throws java.lang.Exception - */ - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(2); - } - - /** - * @throws java.lang.Exception - */ - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testSplitSwitch() throws Exception { - TableName name = TableName.valueOf("testSplitSwitch"); - Table t = TEST_UTIL.createTable(name, FAMILY); - TEST_UTIL.loadTable(t, FAMILY, false); - - RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(t.getName()); - int orignalCount = locator.getAllRegionLocations().size(); - - Admin admin = TEST_UTIL.getAdmin(); - initSwitchStatus(admin); - boolean[] results = admin.setSplitOrMergeEnabled(false, false, Admin.MasterSwitchType.SPLIT); - assertEquals(results.length, 1); - assertTrue(results[0]); - admin.split(t.getName()); - int count = waitOnSplitOrMerge(t).size(); - assertTrue(orignalCount == count); - - results = admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT); - assertEquals(results.length, 1); - assertFalse(results[0]); - admin.split(t.getName()); - count = waitOnSplitOrMerge(t).size(); - assertTrue(orignalCount regions = admin.getTableRegions(t.getName()); - assertTrue(regions.size() > 1); - admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(), - regions.get(1).getEncodedNameAsBytes(), true); - int count = waitOnSplitOrMerge(t).size(); - assertTrue(orignalCount == count); - - waitForMergable(admin, name); - results = admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE); - assertEquals(results.length, 1); - assertFalse(results[0]); - admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(), - regions.get(1).getEncodedNameAsBytes(), true); - count = waitOnSplitOrMerge(t).size(); - assertTrue(orignalCount>count); - admin.close(); - } - - @Test - public void testMultiSwitches() throws IOException { - Admin admin = TEST_UTIL.getAdmin(); - boolean[] switches = admin.setSplitOrMergeEnabled(false, false, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); - for (boolean s : switches){ - assertTrue(s); - } - assertFalse(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)); - assertFalse(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)); - admin.close(); - } - - private void initSwitchStatus(Admin admin) throws IOException { - if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)) { - admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT); - } - if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)) { - admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE); - } - assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)); - assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)); - } - - private void waitForMergable(Admin admin, TableName t) throws InterruptedException, IOException { - // Wait for the Regions to be mergeable - MiniHBaseCluster miniCluster = TEST_UTIL.getMiniHBaseCluster(); - int mergeable = 0; - while (mergeable < 2) { - Thread.sleep(100); - admin.majorCompact(t); - mergeable = 0; - for (JVMClusterUtil.RegionServerThread regionThread: miniCluster.getRegionServerThreads()) { - for (Region region: regionThread.getRegionServer().getOnlineRegions(t)) { - mergeable += ((HRegion)region).isMergeable() ? 1 : 0; - } - } - } - } - - /* - * Wait on table split. May return because we waited long enough on the split - * and it didn't happen. Caller should check. - * @param t - * @return Map of table regions; caller needs to check table actually split. - */ - private List waitOnSplitOrMerge(final Table t) - throws IOException { - try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(t.getName())) { - List regions = locator.getAllRegionLocations(); - int originalCount = regions.size(); - for (int i = 0; i < TEST_UTIL.getConfiguration().getInt("hbase.test.retries", 10); i++) { - Thread.currentThread(); - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - regions = locator.getAllRegionLocations(); - if (regions.size() != originalCount) - break; - } - return regions; - } - } - -} http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-shell/src/main/ruby/hbase/admin.rb ---------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 40c3711..82f0700 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -132,38 +132,6 @@ module Hbase end end - #---------------------------------------------------------------------------------------------- - # Enable/disable one split or merge switch - # Returns previous switch setting. - def splitormerge_switch(type, enabled) - switch_type = nil - if type == 'SPLIT' - switch_type = org.apache.hadoop.hbase.client.Admin::MasterSwitchType::SPLIT - elsif type == 'MERGE' - switch_type = org.apache.hadoop.hbase.client.Admin::MasterSwitchType::MERGE - else - raise ArgumentError, 'only SPLIT or MERGE accepted for type!' - end - @admin.setSplitOrMergeEnabled( - java.lang.Boolean.valueOf(enabled), java.lang.Boolean.valueOf(false), - switch_type)[0] - end - - #---------------------------------------------------------------------------------------------- - # Query the current state of the split or merge switch. - # Returns the switch's state (true is enabled). - def splitormerge_enabled(type) - switch_type = nil - if type == 'SPLIT' - switch_type = org.apache.hadoop.hbase.client.Admin::MasterSwitchType::SPLIT - elsif type == 'MERGE' - switch_type = org.apache.hadoop.hbase.client.Admin::MasterSwitchType::MERGE - else - raise ArgumentError, 'only SPLIT or MERGE accepted for type!' - end - @admin.isSplitOrMergeEnabled(switch_type) - end - def locate_region(table_name, row_key) locator = @connection.getRegionLocator(TableName.valueOf(table_name)) begin http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-shell/src/main/ruby/shell.rb ---------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 4144b91..0ecd3d7 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -333,8 +333,6 @@ Shell.load_command_group( catalogjanitor_enabled compact_rs trace - splitormerge_switch - splitormerge_enabled ], # TODO remove older hlog_roll command :aliases => { http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb ---------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb b/hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb deleted file mode 100644 index 7da7564..0000000 --- a/hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env hbase-jruby -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with this -# work for additional information regarding copyright ownership. The ASF -# licenses this file to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Prints the current split or merge status -module Shell - module Commands - # Command for check split or merge switch status - class SplitormergeEnabled < Command - def help - print <<-EOF -Query the switch's state. You can set switch type, 'SPLIT' or 'MERGE' -Examples: - - hbase> splitormerge_enabled 'SPLIT' -EOF - end - - def command(switch_type) - format_simple_command do - formatter.row( - [admin.splitormerge_enabled(switch_type) ? 'true' : 'false'] - ) - end - end - end - end -end http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb ---------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb b/hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb deleted file mode 100644 index f4c2858..0000000 --- a/hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb +++ /dev/null @@ -1,43 +0,0 @@ -# -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -module Shell - module Commands - # Command for set switch for split and merge - class SplitormergeSwitch < Command - def help - print <<-EOF -Enable/Disable one switch. You can set switch type 'SPLIT' or 'MERGE'. Returns previous split state. -Examples: - - hbase> splitormerge_switch 'SPLIT', true - hbase> splitormerge_switch 'SPLIT', false -EOF - end - - def command(switch_type, enabled) - format_simple_command do - formatter.row( - [admin.splitormerge_switch(switch_type, enabled) ? 'true' : 'false'] - ) - end - end - end - end -end