Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id BDBBA11AE2 for ; Wed, 6 Aug 2014 23:22:47 +0000 (UTC) Received: (qmail 79784 invoked by uid 500); 6 Aug 2014 23:22:46 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 79667 invoked by uid 500); 6 Aug 2014 23:22:46 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 79283 invoked by uid 99); 6 Aug 2014 23:22:46 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 06 Aug 2014 23:22:46 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id 2A2809C15B2; Wed, 6 Aug 2014 23:22:46 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: jxiang@apache.org To: commits@hbase.apache.org Date: Wed, 06 Aug 2014 23:22:53 -0000 Message-Id: <77dac6423f1d4dae8fa4e671d0c939e7@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [09/10] HBASE-11611 Clean up ZK-based region assignment http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index a1caf87..10274b4 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -2117,1093 +2117,6 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:ClusterUp) } - public interface RegionTransitionOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint32 event_type_code = 1; - /** - * required uint32 event_type_code = 1; - * - *
-     * Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode()
-     * 
- */ - boolean hasEventTypeCode(); - /** - * required uint32 event_type_code = 1; - * - *
-     * Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode()
-     * 
- */ - int getEventTypeCode(); - - // required bytes region_name = 2; - /** - * required bytes region_name = 2; - * - *
-     * Full regionname in bytes
-     * 
- */ - boolean hasRegionName(); - /** - * required bytes region_name = 2; - * - *
-     * Full regionname in bytes
-     * 
- */ - com.google.protobuf.ByteString getRegionName(); - - // required uint64 create_time = 3; - /** - * required uint64 create_time = 3; - */ - boolean hasCreateTime(); - /** - * required uint64 create_time = 3; - */ - long getCreateTime(); - - // required .ServerName server_name = 4; - /** - * required .ServerName server_name = 4; - * - *
-     * The region server where the transition will happen or is happening
-     * 
- */ - boolean hasServerName(); - /** - * required .ServerName server_name = 4; - * - *
-     * The region server where the transition will happen or is happening
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); - /** - * required .ServerName server_name = 4; - * - *
-     * The region server where the transition will happen or is happening
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); - - // optional bytes payload = 5; - /** - * optional bytes payload = 5; - */ - boolean hasPayload(); - /** - * optional bytes payload = 5; - */ - com.google.protobuf.ByteString getPayload(); - } - /** - * Protobuf type {@code RegionTransition} - * - *
-   **
-   * What we write under unassigned up in zookeeper as a region moves through
-   * open/close, etc., regions.  Details a region in transition.
-   * 
- */ - public static final class RegionTransition extends - com.google.protobuf.GeneratedMessage - implements RegionTransitionOrBuilder { - // Use RegionTransition.newBuilder() to construct. - private RegionTransition(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private RegionTransition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final RegionTransition defaultInstance; - public static RegionTransition getDefaultInstance() { - return defaultInstance; - } - - public RegionTransition getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private RegionTransition( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - eventTypeCode_ = input.readUInt32(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - regionName_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - createTime_ = input.readUInt64(); - break; - } - case 34: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; - if (((bitField0_ & 0x00000008) == 0x00000008)) { - subBuilder = serverName_.toBuilder(); - } - serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(serverName_); - serverName_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000008; - break; - } - case 42: { - bitField0_ |= 0x00000010; - payload_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public RegionTransition parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RegionTransition(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required uint32 event_type_code = 1; - public static final int EVENT_TYPE_CODE_FIELD_NUMBER = 1; - private int eventTypeCode_; - /** - * required uint32 event_type_code = 1; - * - *
-     * Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode()
-     * 
- */ - public boolean hasEventTypeCode() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint32 event_type_code = 1; - * - *
-     * Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode()
-     * 
- */ - public int getEventTypeCode() { - return eventTypeCode_; - } - - // required bytes region_name = 2; - public static final int REGION_NAME_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString regionName_; - /** - * required bytes region_name = 2; - * - *
-     * Full regionname in bytes
-     * 
- */ - public boolean hasRegionName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bytes region_name = 2; - * - *
-     * Full regionname in bytes
-     * 
- */ - public com.google.protobuf.ByteString getRegionName() { - return regionName_; - } - - // required uint64 create_time = 3; - public static final int CREATE_TIME_FIELD_NUMBER = 3; - private long createTime_; - /** - * required uint64 create_time = 3; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required uint64 create_time = 3; - */ - public long getCreateTime() { - return createTime_; - } - - // required .ServerName server_name = 4; - public static final int SERVER_NAME_FIELD_NUMBER = 4; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; - /** - * required .ServerName server_name = 4; - * - *
-     * The region server where the transition will happen or is happening
-     * 
- */ - public boolean hasServerName() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * required .ServerName server_name = 4; - * - *
-     * The region server where the transition will happen or is happening
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { - return serverName_; - } - /** - * required .ServerName server_name = 4; - * - *
-     * The region server where the transition will happen or is happening
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { - return serverName_; - } - - // optional bytes payload = 5; - public static final int PAYLOAD_FIELD_NUMBER = 5; - private com.google.protobuf.ByteString payload_; - /** - * optional bytes payload = 5; - */ - public boolean hasPayload() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bytes payload = 5; - */ - public com.google.protobuf.ByteString getPayload() { - return payload_; - } - - private void initFields() { - eventTypeCode_ = 0; - regionName_ = com.google.protobuf.ByteString.EMPTY; - createTime_ = 0L; - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - payload_ = com.google.protobuf.ByteString.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasEventTypeCode()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRegionName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCreateTime()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasServerName()) { - memoizedIsInitialized = 0; - return false; - } - if (!getServerName().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, eventTypeCode_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, regionName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, createTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, serverName_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBytes(5, payload_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, eventTypeCode_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, regionName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, createTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, serverName_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(5, payload_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition) obj; - - boolean result = true; - result = result && (hasEventTypeCode() == other.hasEventTypeCode()); - if (hasEventTypeCode()) { - result = result && (getEventTypeCode() - == other.getEventTypeCode()); - } - result = result && (hasRegionName() == other.hasRegionName()); - if (hasRegionName()) { - result = result && getRegionName() - .equals(other.getRegionName()); - } - result = result && (hasCreateTime() == other.hasCreateTime()); - if (hasCreateTime()) { - result = result && (getCreateTime() - == other.getCreateTime()); - } - result = result && (hasServerName() == other.hasServerName()); - if (hasServerName()) { - result = result && getServerName() - .equals(other.getServerName()); - } - result = result && (hasPayload() == other.hasPayload()); - if (hasPayload()) { - result = result && getPayload() - .equals(other.getPayload()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasEventTypeCode()) { - hash = (37 * hash) + EVENT_TYPE_CODE_FIELD_NUMBER; - hash = (53 * hash) + getEventTypeCode(); - } - if (hasRegionName()) { - hash = (37 * hash) + REGION_NAME_FIELD_NUMBER; - hash = (53 * hash) + getRegionName().hashCode(); - } - if (hasCreateTime()) { - hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCreateTime()); - } - if (hasServerName()) { - hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; - hash = (53 * hash) + getServerName().hashCode(); - } - if (hasPayload()) { - hash = (37 * hash) + PAYLOAD_FIELD_NUMBER; - hash = (53 * hash) + getPayload().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code RegionTransition} - * - *
-     **
-     * What we write under unassigned up in zookeeper as a region moves through
-     * open/close, etc., regions.  Details a region in transition.
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransitionOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getServerNameFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - eventTypeCode_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - regionName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - createTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - if (serverNameBuilder_ == null) { - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - } else { - serverNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - payload_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.eventTypeCode_ = eventTypeCode_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.regionName_ = regionName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.createTime_ = createTime_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (serverNameBuilder_ == null) { - result.serverName_ = serverName_; - } else { - result.serverName_ = serverNameBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.payload_ = payload_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDefaultInstance()) return this; - if (other.hasEventTypeCode()) { - setEventTypeCode(other.getEventTypeCode()); - } - if (other.hasRegionName()) { - setRegionName(other.getRegionName()); - } - if (other.hasCreateTime()) { - setCreateTime(other.getCreateTime()); - } - if (other.hasServerName()) { - mergeServerName(other.getServerName()); - } - if (other.hasPayload()) { - setPayload(other.getPayload()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasEventTypeCode()) { - - return false; - } - if (!hasRegionName()) { - - return false; - } - if (!hasCreateTime()) { - - return false; - } - if (!hasServerName()) { - - return false; - } - if (!getServerName().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required uint32 event_type_code = 1; - private int eventTypeCode_ ; - /** - * required uint32 event_type_code = 1; - * - *
-       * Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode()
-       * 
- */ - public boolean hasEventTypeCode() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint32 event_type_code = 1; - * - *
-       * Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode()
-       * 
- */ - public int getEventTypeCode() { - return eventTypeCode_; - } - /** - * required uint32 event_type_code = 1; - * - *
-       * Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode()
-       * 
- */ - public Builder setEventTypeCode(int value) { - bitField0_ |= 0x00000001; - eventTypeCode_ = value; - onChanged(); - return this; - } - /** - * required uint32 event_type_code = 1; - * - *
-       * Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode()
-       * 
- */ - public Builder clearEventTypeCode() { - bitField0_ = (bitField0_ & ~0x00000001); - eventTypeCode_ = 0; - onChanged(); - return this; - } - - // required bytes region_name = 2; - private com.google.protobuf.ByteString regionName_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes region_name = 2; - * - *
-       * Full regionname in bytes
-       * 
- */ - public boolean hasRegionName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bytes region_name = 2; - * - *
-       * Full regionname in bytes
-       * 
- */ - public com.google.protobuf.ByteString getRegionName() { - return regionName_; - } - /** - * required bytes region_name = 2; - * - *
-       * Full regionname in bytes
-       * 
- */ - public Builder setRegionName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - regionName_ = value; - onChanged(); - return this; - } - /** - * required bytes region_name = 2; - * - *
-       * Full regionname in bytes
-       * 
- */ - public Builder clearRegionName() { - bitField0_ = (bitField0_ & ~0x00000002); - regionName_ = getDefaultInstance().getRegionName(); - onChanged(); - return this; - } - - // required uint64 create_time = 3; - private long createTime_ ; - /** - * required uint64 create_time = 3; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required uint64 create_time = 3; - */ - public long getCreateTime() { - return createTime_; - } - /** - * required uint64 create_time = 3; - */ - public Builder setCreateTime(long value) { - bitField0_ |= 0x00000004; - createTime_ = value; - onChanged(); - return this; - } - /** - * required uint64 create_time = 3; - */ - public Builder clearCreateTime() { - bitField0_ = (bitField0_ & ~0x00000004); - createTime_ = 0L; - onChanged(); - return this; - } - - // required .ServerName server_name = 4; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; - /** - * required .ServerName server_name = 4; - * - *
-       * The region server where the transition will happen or is happening
-       * 
- */ - public boolean hasServerName() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * required .ServerName server_name = 4; - * - *
-       * The region server where the transition will happen or is happening
-       * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { - if (serverNameBuilder_ == null) { - return serverName_; - } else { - return serverNameBuilder_.getMessage(); - } - } - /** - * required .ServerName server_name = 4; - * - *
-       * The region server where the transition will happen or is happening
-       * 
- */ - public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - serverName_ = value; - onChanged(); - } else { - serverNameBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - /** - * required .ServerName server_name = 4; - * - *
-       * The region server where the transition will happen or is happening
-       * 
- */ - public Builder setServerName( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - serverName_ = builderForValue.build(); - onChanged(); - } else { - serverNameBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - /** - * required .ServerName server_name = 4; - * - *
-       * The region server where the transition will happen or is happening
-       * 
- */ - public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { - serverName_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); - } else { - serverName_ = value; - } - onChanged(); - } else { - serverNameBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - /** - * required .ServerName server_name = 4; - * - *
-       * The region server where the transition will happen or is happening
-       * 
- */ - public Builder clearServerName() { - if (serverNameBuilder_ == null) { - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - onChanged(); - } else { - serverNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - /** - * required .ServerName server_name = 4; - * - *
-       * The region server where the transition will happen or is happening
-       * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getServerNameFieldBuilder().getBuilder(); - } - /** - * required .ServerName server_name = 4; - * - *
-       * The region server where the transition will happen or is happening
-       * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { - if (serverNameBuilder_ != null) { - return serverNameBuilder_.getMessageOrBuilder(); - } else { - return serverName_; - } - } - /** - * required .ServerName server_name = 4; - * - *
-       * The region server where the transition will happen or is happening
-       * 
- */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getServerNameFieldBuilder() { - if (serverNameBuilder_ == null) { - serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - serverName_, - getParentForChildren(), - isClean()); - serverName_ = null; - } - return serverNameBuilder_; - } - - // optional bytes payload = 5; - private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY; - /** - * optional bytes payload = 5; - */ - public boolean hasPayload() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bytes payload = 5; - */ - public com.google.protobuf.ByteString getPayload() { - return payload_; - } - /** - * optional bytes payload = 5; - */ - public Builder setPayload(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - payload_ = value; - onChanged(); - return this; - } - /** - * optional bytes payload = 5; - */ - public Builder clearPayload() { - bitField0_ = (bitField0_ & ~0x00000010); - payload_ = getDefaultInstance().getPayload(); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:RegionTransition) - } - - static { - defaultInstance = new RegionTransition(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RegionTransition) - } - public interface SplitLogTaskOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -10448,11 +9361,6 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_ClusterUp_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor - internal_static_RegionTransition_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RegionTransition_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor internal_static_SplitLogTask_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -10510,38 +9418,35 @@ public final class ZooKeeperProtos { "gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\022" + "\023\n\013rpc_version\030\002 \001(\r\":\n\006Master\022\033\n\006master" + "\030\001 \002(\0132\013.ServerName\022\023\n\013rpc_version\030\002 \001(\r" + - "\"\037\n\tClusterUp\022\022\n\nstart_date\030\001 \002(\t\"\210\001\n\020Re" + - "gionTransition\022\027\n\017event_type_code\030\001 \002(\r\022" + - "\023\n\013region_name\030\002 \002(\014\022\023\n\013create_time\030\003 \002(" + - "\004\022 \n\013server_name\030\004 \002(\0132\013.ServerName\022\017\n\007p" + - "ayload\030\005 \001(\014\"\214\002\n\014SplitLogTask\022\"\n\005state\030\001" + - " \002(\0162\023.SplitLogTask.State\022 \n\013server_name", - "\030\002 \002(\0132\013.ServerName\0221\n\004mode\030\003 \001(\0162\032.Spli" + - "tLogTask.RecoveryMode:\007UNKNOWN\"C\n\005State\022" + - "\016\n\nUNASSIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002" + - "\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007U" + - "NKNOWN\020\000\022\021\n\rLOG_SPLITTING\020\001\022\016\n\nLOG_REPLA" + - "Y\020\002\"n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table.Stat" + - "e:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISA" + - "BLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"\215\001\n\017" + - "ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027r" + - "eplicationEndpointImpl\030\002 \001(\t\022\035\n\004data\030\003 \003", - "(\0132\017.BytesBytesPair\022&\n\rconfiguration\030\004 \003" + - "(\0132\017.NameStringPair\"^\n\020ReplicationState\022" + - "&\n\005state\030\001 \002(\0162\027.ReplicationState.State\"" + - "\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027R" + - "eplicationHLogPosition\022\020\n\010position\030\001 \002(\003" + - "\"%\n\017ReplicationLock\022\022\n\nlock_owner\030\001 \002(\t\"" + - "\230\001\n\tTableLock\022\036\n\ntable_name\030\001 \001(\0132\n.Tabl" + - "eName\022\037\n\nlock_owner\030\002 \001(\0132\013.ServerName\022\021" + - "\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017\n\007" + - "purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\";\n\017St", - "oreSequenceId\022\023\n\013family_name\030\001 \002(\014\022\023\n\013se" + - "quence_id\030\002 \002(\004\"g\n\026RegionStoreSequenceId" + - "s\022 \n\030last_flushed_sequence_id\030\001 \002(\004\022+\n\021s" + - "tore_sequence_id\030\002 \003(\0132\020.StoreSequenceId" + - "BE\n*org.apache.hadoop.hbase.protobuf.gen" + - "eratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" + "\"\037\n\tClusterUp\022\022\n\nstart_date\030\001 \002(\t\"\214\002\n\014Sp" + + "litLogTask\022\"\n\005state\030\001 \002(\0162\023.SplitLogTask" + + ".State\022 \n\013server_name\030\002 \002(\0132\013.ServerName" + + "\0221\n\004mode\030\003 \001(\0162\032.SplitLogTask.RecoveryMo" + + "de:\007UNKNOWN\"C\n\005State\022\016\n\nUNASSIGNED\020\000\022\t\n\005" + + "OWNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004", + "\">\n\014RecoveryMode\022\013\n\007UNKNOWN\020\000\022\021\n\rLOG_SPL" + + "ITTING\020\001\022\016\n\nLOG_REPLAY\020\002\"n\n\005Table\022$\n\005sta" + + "te\030\001 \002(\0162\014.Table.State:\007ENABLED\"?\n\005State" + + "\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING" + + "\020\002\022\014\n\010ENABLING\020\003\"\215\001\n\017ReplicationPeer\022\022\n\n" + + "clusterkey\030\001 \002(\t\022\037\n\027replicationEndpointI" + + "mpl\030\002 \001(\t\022\035\n\004data\030\003 \003(\0132\017.BytesBytesPair" + + "\022&\n\rconfiguration\030\004 \003(\0132\017.NameStringPair" + + "\"^\n\020ReplicationState\022&\n\005state\030\001 \002(\0162\027.Re" + + "plicationState.State\"\"\n\005State\022\013\n\007ENABLED", + "\020\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHLogPosit" + + "ion\022\020\n\010position\030\001 \002(\003\"%\n\017ReplicationLock" + + "\022\022\n\nlock_owner\030\001 \002(\t\"\230\001\n\tTableLock\022\036\n\nta" + + "ble_name\030\001 \001(\0132\n.TableName\022\037\n\nlock_owner" + + "\030\002 \001(\0132\013.ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021" + + "\n\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013cr" + + "eate_time\030\006 \001(\003\";\n\017StoreSequenceId\022\023\n\013fa" + + "mily_name\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026" + + "RegionStoreSequenceIds\022 \n\030last_flushed_s" + + "equence_id\030\001 \002(\004\022+\n\021store_sequence_id\030\002 ", + "\003(\0132\020.StoreSequenceIdBE\n*org.apache.hado" + + "op.hbase.protobuf.generatedB\017ZooKeeperPr" + + "otosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10566,62 +9471,56 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ClusterUp_descriptor, new java.lang.String[] { "StartDate", }); - internal_static_RegionTransition_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_RegionTransition_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RegionTransition_descriptor, - new java.lang.String[] { "EventTypeCode", "RegionName", "CreateTime", "ServerName", "Payload", }); internal_static_SplitLogTask_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(3); internal_static_SplitLogTask_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SplitLogTask_descriptor, new java.lang.String[] { "State", "ServerName", "Mode", }); internal_static_Table_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(4); internal_static_Table_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Table_descriptor, new java.lang.String[] { "State", }); internal_static_ReplicationPeer_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(5); internal_static_ReplicationPeer_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationPeer_descriptor, new java.lang.String[] { "Clusterkey", "ReplicationEndpointImpl", "Data", "Configuration", }); internal_static_ReplicationState_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(6); internal_static_ReplicationState_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationState_descriptor, new java.lang.String[] { "State", }); internal_static_ReplicationHLogPosition_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(7); internal_static_ReplicationHLogPosition_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationHLogPosition_descriptor, new java.lang.String[] { "Position", }); internal_static_ReplicationLock_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(8); internal_static_ReplicationLock_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationLock_descriptor, new java.lang.String[] { "LockOwner", }); internal_static_TableLock_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(9); internal_static_TableLock_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableLock_descriptor, new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", }); internal_static_StoreSequenceId_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(10); internal_static_StoreSequenceId_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StoreSequenceId_descriptor, new java.lang.String[] { "FamilyName", "SequenceId", }); internal_static_RegionStoreSequenceIds_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(11); internal_static_RegionStoreSequenceIds_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionStoreSequenceIds_descriptor, http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-protocol/src/main/protobuf/ZooKeeper.proto ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index 598385c..4d727c6 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -59,21 +59,6 @@ message ClusterUp { } /** - * What we write under unassigned up in zookeeper as a region moves through - * open/close, etc., regions. Details a region in transition. - */ -message RegionTransition { - // Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode() - required uint32 event_type_code = 1; - // Full regionname in bytes - required bytes region_name = 2; - required uint64 create_time = 3; - // The region server where the transition will happen or is happening - required ServerName server_name = 4; - optional bytes payload = 5; -} - -/** * WAL SplitLog directory znodes have this for content. Used doing distributed * WAL splitting. Holds current state and name of server that originated split. */ http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java index 9c9bfba..1891941 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java @@ -52,24 +52,4 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan @Override public abstract TableStateManager getTableStateManager() throws InterruptedException, CoordinatedStateException; - - /** - * Method to retrieve coordination for split transaction. - */ - abstract public SplitTransactionCoordination getSplitTransactionCoordination(); - - /** - * Method to retrieve coordination for closing region operations. - */ - public abstract CloseRegionCoordination getCloseRegionCoordination(); - - /** - * Method to retrieve coordination for opening region operations. - */ - public abstract OpenRegionCoordination getOpenRegionCoordination(); - - /** - * Method to retrieve coordination for region merge transaction - */ - public abstract RegionMergeCoordination getRegionMergeCoordination(); } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/CloseRegionCoordination.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/CloseRegionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/CloseRegionCoordination.java deleted file mode 100644 index 503e4fc..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/CloseRegionCoordination.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.coordination; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.regionserver.HRegion; - -/** - * Coordinated operations for close region handlers. - */ -@InterfaceAudience.Private -public interface CloseRegionCoordination { - - /** - * Called before actual region closing to check that we can do close operation - * on this region. - * @param regionInfo region being closed - * @param crd details about closing operation - * @return true if caller shall proceed and close, false if need to abort closing. - */ - boolean checkClosingState(HRegionInfo regionInfo, CloseRegionDetails crd); - - /** - * Called after region is closed to notify all interesting parties / "register" - * region as finally closed. - * @param region region being closed - * @param sn ServerName on which task runs - * @param crd details about closing operation - */ - void setClosedState(HRegion region, ServerName sn, CloseRegionDetails crd); - - /** - * Construct CloseRegionDetails instance from CloseRegionRequest. - * @return instance of CloseRegionDetails - */ - CloseRegionDetails parseFromProtoRequest(AdminProtos.CloseRegionRequest request); - - /** - * Get details object with params for case when we're closing on - * regionserver side internally (not because of RPC call from master), - * so we don't parse details from protobuf request. - */ - CloseRegionDetails getDetaultDetails(); - - /** - * Marker interface for region closing tasks. Used to carry implementation details in - * encapsulated way through Handlers to the consensus API. - */ - static interface CloseRegionDetails { - } -} http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/OpenRegionCoordination.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/OpenRegionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/OpenRegionCoordination.java deleted file mode 100644 index 0c6871d..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/OpenRegionCoordination.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.coordination; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; - -import java.io.IOException; - -/** - * Cocoordination operations for opening regions. - */ -@InterfaceAudience.Private -public interface OpenRegionCoordination { - - //--------------------- - // RS-side operations - //--------------------- - /** - * Tries to move regions to OPENED state. - * - * @param r Region we're working on. - * @param ord details about region opening task - * @return whether transition was successful or not - * @throws java.io.IOException - */ - boolean transitionToOpened(HRegion r, OpenRegionDetails ord) throws IOException; - - /** - * Transitions region from offline to opening state. - * @param regionInfo region we're working on. - * @param ord details about opening task. - * @return true if successful, false otherwise - */ - boolean transitionFromOfflineToOpening(HRegionInfo regionInfo, - OpenRegionDetails ord); - - /** - * Heartbeats to prevent timeouts. - * - * @param ord details about opening task. - * @param regionInfo region we're working on. - * @param rsServices instance of RegionServerrServices - * @param context used for logging purposes only - * @return true if successful heartbeat, false otherwise. - */ - boolean tickleOpening(OpenRegionDetails ord, HRegionInfo regionInfo, - RegionServerServices rsServices, String context); - - /** - * Tries transition region from offline to failed open. - * @param rsServices instance of RegionServerServices - * @param hri region we're working on - * @param ord details about region opening task - * @return true if successful, false otherwise - */ - boolean tryTransitionFromOfflineToFailedOpen(RegionServerServices rsServices, - HRegionInfo hri, OpenRegionDetails ord); - - /** - * Tries transition from Opening to Failed open. - * @param hri region we're working on - * @param ord details about region opening task - * @return true if successfu. false otherwise. - */ - boolean tryTransitionFromOpeningToFailedOpen(HRegionInfo hri, OpenRegionDetails ord); - - /** - * Construct OpenRegionDetails instance from part of protobuf request. - * @return instance of OpenRegionDetails. - */ - OpenRegionDetails parseFromProtoRequest(AdminProtos.OpenRegionRequest.RegionOpenInfo - regionOpenInfo); - - /** - * Get details object with params for case when we're opening on - * regionserver side with all "default" properties. - */ - OpenRegionDetails getDetailsForNonCoordinatedOpening(); - - //------------------------- - // HMaster-side operations - //------------------------- - - /** - * Commits opening operation on HM side (steps required for "commit" - * are determined by coordination implementation). - * @return true if committed successfully, false otherwise. - */ - public boolean commitOpenOnMasterSide(AssignmentManager assignmentManager, - HRegionInfo regionInfo, - OpenRegionDetails ord); - - /** - * Interface for region opening tasks. Used to carry implementation details in - * encapsulated way through Handlers to the coordination API. - */ - static interface OpenRegionDetails { - /** - * Sets server name on which opening operation is running. - */ - void setServerName(ServerName serverName); - - /** - * @return server name on which opening op is running. - */ - ServerName getServerName(); - } -} http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/RegionMergeCoordination.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/RegionMergeCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/RegionMergeCoordination.java deleted file mode 100644 index b51dd9c..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/RegionMergeCoordination.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. - */ - -package org.apache.hadoop.hbase.coordination; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; - -/** - * Coordination operations for region merge transaction. The operation should be coordinated at the - * following stages:
- * 1. startRegionMergeTransaction - all preparation/initialization for merge region transaction
- * 2. waitForRegionMergeTransaction - wait until coordination complete all works related - * to merge
- * 3. confirmRegionMergeTransaction - confirm that the merge could be completed and none of merging - * regions moved somehow
- * 4. completeRegionMergeTransaction - all steps that are required to complete the transaction. - * Called after PONR (point of no return)
- */ -@InterfaceAudience.Private -public interface RegionMergeCoordination { - - RegionMergeDetails getDefaultDetails(); - - /** - * Dummy interface for region merge transaction details. - */ - public static interface RegionMergeDetails { - } - - /** - * Start the region merge transaction - * @param region region to be created as offline - * @param serverName server event originates from - * @throws IOException - */ - void startRegionMergeTransaction(HRegionInfo region, ServerName serverName, HRegionInfo a, - HRegionInfo b) throws IOException; - - /** - * Get everything ready for region merge - * @throws IOException - */ - void waitForRegionMergeTransaction(RegionServerServices services, HRegionInfo mergedRegionInfo, - HRegion region_a, HRegion region_b, RegionMergeDetails details) throws IOException; - - /** - * Confirm that the region merge can be performed - * @param merged region - * @param a merging region A - * @param b merging region B - * @param serverName server event originates from - * @param rmd region merge details - * @throws IOException If thrown, transaction failed. - */ - void confirmRegionMergeTransaction(HRegionInfo merged, HRegionInfo a, HRegionInfo b, - ServerName serverName, RegionMergeDetails rmd) throws IOException; - - /** - * @param merged region - * @param a merging region A - * @param b merging region B - * @param serverName server event originates from - * @param rmd region merge details - * @throws IOException - */ - void processRegionMergeRequest(HRegionInfo merged, HRegionInfo a, HRegionInfo b, - ServerName serverName, RegionMergeDetails rmd) throws IOException; - - /** - * Finish off merge transaction - * @param services Used to online/offline regions. - * @param merged region - * @param region_a merging region A - * @param region_b merging region B - * @param rmd region merge details - * @param mergedRegion - * @throws IOException If thrown, transaction failed. Call - * {@link RegionMergeTransaction#rollback(Server, RegionServerServices)} - */ - void completeRegionMergeTransaction(RegionServerServices services, HRegionInfo merged, - HRegion region_a, HRegion region_b, RegionMergeDetails rmd, HRegion mergedRegion) - throws IOException; - - /** - * This method is used during rollback - * @param merged region to be rolled back - */ - void clean(HRegionInfo merged); - -} http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitTransactionCoordination.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitTransactionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitTransactionCoordination.java deleted file mode 100644 index 659d4e5..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitTransactionCoordination.java +++ /dev/null @@ -1,101 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.coordination; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.SplitTransaction; - -/** - * Coordination operations for split transaction. The split operation should be coordinated at the - * following stages: - * 1. start - all preparation/initialization for split transaction should be done there. - * 2. waitForSplitTransaction - the coordination should perform all logic related to split - * transaction and wait till it's finished - * 3. completeSplitTransaction - all steps that are required to complete the transaction. - * Called after PONR (point of no return) - */ -@InterfaceAudience.Private -public interface SplitTransactionCoordination { - - /** - * Dummy interface for split transaction details. - */ - public static interface SplitTransactionDetails { - } - - SplitTransactionDetails getDefaultDetails(); - - - /** - * init coordination for split transaction - * @param parent region to be created as offline - * @param serverName server event originates from - * @param hri_a daughter region - * @param hri_b daughter region - * @throws IOException - */ - void startSplitTransaction(HRegion parent, ServerName serverName, - HRegionInfo hri_a, HRegionInfo hri_b) throws IOException; - - /** - * Wait while coordination process the transaction - * @param services Used to online/offline regions. - * @param parent region - * @param hri_a daughter region - * @param hri_b daughter region - * @param std split transaction details - * @throws IOException - */ - void waitForSplitTransaction(final RegionServerServices services, - HRegion parent, HRegionInfo hri_a, HRegionInfo hri_b, SplitTransactionDetails std) - throws IOException; - - /** - * Finish off split transaction - * @param services Used to online/offline regions. - * @param first daughter region - * @param second daughter region - * @param std split transaction details - * @param parent - * @throws IOException If thrown, transaction failed. Call - * {@link SplitTransaction#rollback(Server, RegionServerServices)} - */ - void completeSplitTransaction(RegionServerServices services, HRegion first, - HRegion second, SplitTransactionDetails std, HRegion parent) throws IOException; - - /** - * clean the split transaction - * @param hri node to delete - */ - void clean(final HRegionInfo hri); - - /** - * Required by AssignmentManager - */ - int processTransition(HRegionInfo p, HRegionInfo hri_a, HRegionInfo hri_b, - ServerName sn, SplitTransactionDetails std) throws IOException; -} http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java deleted file mode 100644 index de9f51f..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java +++ /dev/null @@ -1,314 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. - */ - -package org.apache.hadoop.hbase.coordination; - -import static org.apache.hadoop.hbase.executor.EventType.RS_ZK_REGION_SPLIT; -import static org.apache.hadoop.hbase.executor.EventType.RS_ZK_REGION_SPLITTING; -import static org.apache.hadoop.hbase.executor.EventType.RS_ZK_REQUEST_REGION_SPLIT; - -import java.io.IOException; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.RegionTransition; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.coordination.SplitTransactionCoordination; -import org.apache.hadoop.hbase.executor.EventType; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.SplitTransaction; -import org.apache.hadoop.hbase.zookeeper.ZKAssign; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.data.Stat; - -public class ZKSplitTransactionCoordination implements SplitTransactionCoordination { - - private CoordinatedStateManager coordinationManager; - private final ZooKeeperWatcher watcher; - - private static final Log LOG = LogFactory.getLog(ZKSplitTransactionCoordination.class); - - public ZKSplitTransactionCoordination(CoordinatedStateManager coordinationProvider, - ZooKeeperWatcher watcher) { - this.coordinationManager = coordinationProvider; - this.watcher = watcher; - } - - /** - * Creates a new ephemeral node in the PENDING_SPLIT state for the specified region. Create it - * ephemeral in case regionserver dies mid-split. - *

- * Does not transition nodes from other states. If a node already exists for this region, an - * Exception will be thrown. - * @param parent region to be created as offline - * @param serverName server event originates from - * @param hri_a daughter region - * @param hri_b daughter region - * @throws IOException - */ - - @Override - public void startSplitTransaction(HRegion parent, ServerName serverName, HRegionInfo hri_a, - HRegionInfo hri_b) throws IOException { - - HRegionInfo region = parent.getRegionInfo(); - try { - - LOG.debug(watcher.prefix("Creating ephemeral node for " + region.getEncodedName() - + " in PENDING_SPLIT state")); - byte[] payload = HRegionInfo.toDelimitedByteArray(hri_a, hri_b); - RegionTransition rt = - RegionTransition.createRegionTransition(RS_ZK_REQUEST_REGION_SPLIT, - region.getRegionName(), serverName, payload); - String node = ZKAssign.getNodeName(watcher, region.getEncodedName()); - if (!ZKUtil.createEphemeralNodeAndWatch(watcher, node, rt.toByteArray())) { - throw new IOException("Failed create of ephemeral " + node); - } - - } catch (KeeperException e) { - throw new IOException("Failed creating PENDING_SPLIT znode on " - + parent.getRegionNameAsString(), e); - } - - } - - /** - * Transitions an existing ephemeral node for the specified region which is currently in the begin - * state to be in the end state. Master cleans up the final SPLIT znode when it reads it (or if we - * crash, zk will clean it up). - *

- * Does not transition nodes from other states. If for some reason the node could not be - * transitioned, the method returns -1. If the transition is successful, the version of the node - * after transition is returned. - *

- * This method can fail and return false for three different reasons: - *

    - *
  • Node for this region does not exist
  • - *
  • Node for this region is not in the begin state
  • - *
  • After verifying the begin state, update fails because of wrong version (this should never - * actually happen since an RS only does this transition following a transition to the begin - * state. If two RS are conflicting, one would fail the original transition to the begin state and - * not this transition)
  • - *
- *

- * Does not set any watches. - *

- * This method should only be used by a RegionServer when splitting a region. - * @param parent region to be transitioned to opened - * @param a Daughter a of split - * @param b Daughter b of split - * @param serverName server event originates from - * @param std split transaction details - * @param beginState the expected current state the znode should be - * @param endState the state to be transition to - * @return version of node after transition, -1 if unsuccessful transition - * @throws IOException - */ - - private int transitionSplittingNode(HRegionInfo parent, HRegionInfo a, HRegionInfo b, - ServerName serverName, SplitTransactionDetails std, final EventType beginState, - final EventType endState) throws IOException { - ZkSplitTransactionDetails zstd = (ZkSplitTransactionDetails) std; - byte[] payload = HRegionInfo.toDelimitedByteArray(a, b); - try { - return ZKAssign.transitionNode(watcher, parent, serverName, beginState, endState, - zstd.getZnodeVersion(), payload); - } catch (KeeperException e) { - throw new IOException( - "Failed transition of splitting node " + parent.getRegionNameAsString(), e); - } - } - - /** - * Wait for the splitting node to be transitioned from pending_split to splitting by master. - * That's how we are sure master has processed the event and is good with us to move on. If we - * don't get any update, we periodically transition the node so that master gets the callback. If - * the node is removed or is not in pending_split state any more, we abort the split. - */ - @Override - public void waitForSplitTransaction(final RegionServerServices services, HRegion parent, - HRegionInfo hri_a, HRegionInfo hri_b, SplitTransactionDetails sptd) throws IOException { - ZkSplitTransactionDetails zstd = (ZkSplitTransactionDetails) sptd; - - // After creating the split node, wait for master to transition it - // from PENDING_SPLIT to SPLITTING so that we can move on. We want master - // knows about it and won't transition any region which is splitting. - try { - int spins = 0; - Stat stat = new Stat(); - ServerName expectedServer = coordinationManager.getServer().getServerName(); - String node = parent.getRegionInfo().getEncodedName(); - while (!(coordinationManager.getServer().isStopped() || services.isStopping())) { - if (spins % 5 == 0) { - LOG.debug("Still waiting for master to process " + "the pending_split for " + node); - SplitTransactionDetails temp = getDefaultDetails(); - transitionSplittingNode(parent.getRegionInfo(), hri_a, hri_b, expectedServer, temp, - RS_ZK_REQUEST_REGION_SPLIT, RS_ZK_REQUEST_REGION_SPLIT); - } - Thread.sleep(100); - spins++; - byte[] data = ZKAssign.getDataNoWatch(watcher, node, stat); - if (data == null) { - throw new IOException("Data is null, splitting node " + node + " no longer exists"); - } - RegionTransition rt = RegionTransition.parseFrom(data); - EventType et = rt.getEventType(); - if (et == RS_ZK_REGION_SPLITTING) { - ServerName serverName = rt.getServerName(); - if (!serverName.equals(expectedServer)) { - throw new IOException("Splitting node " + node + " is for " + serverName + ", not us " - + expectedServer); - } - byte[] payloadOfSplitting = rt.getPayload(); - List splittingRegions = - HRegionInfo.parseDelimitedFrom(payloadOfSplitting, 0, payloadOfSplitting.length); - assert splittingRegions.size() == 2; - HRegionInfo a = splittingRegions.get(0); - HRegionInfo b = splittingRegions.get(1); - if (!(hri_a.equals(a) && hri_b.equals(b))) { - throw new IOException("Splitting node " + node + " is for " + a + ", " + b - + ", not expected daughters: " + hri_a + ", " + hri_b); - } - // Master has processed it. - zstd.setZnodeVersion(stat.getVersion()); - return; - } - if (et != RS_ZK_REQUEST_REGION_SPLIT) { - throw new IOException("Splitting node " + node + " moved out of splitting to " + et); - } - } - // Server is stopping/stopped - throw new IOException("Server is " + (services.isStopping() ? "stopping" : "stopped")); - } catch (Exception e) { - if (e instanceof InterruptedException) { - Thread.currentThread().interrupt(); - } - throw new IOException("Failed getting SPLITTING znode on " + parent.getRegionNameAsString(), - e); - } - } - - /** - * Finish off split transaction, transition the zknode - * @param services Used to online/offline regions. - * @param a daughter region - * @param b daughter region - * @param std split transaction details - * @param parent - * @throws IOException If thrown, transaction failed. Call - * {@link SplitTransaction#rollback(Server, RegionServerServices)} - */ - @Override - public void completeSplitTransaction(final RegionServerServices services, HRegion a, HRegion b, - SplitTransactionDetails std, HRegion parent) throws IOException { - ZkSplitTransactionDetails zstd = (ZkSplitTransactionDetails) std; - // Tell master about split by updating zk. If we fail, abort. - if (coordinationManager.getServer() != null) { - try { - zstd.setZnodeVersion(transitionSplittingNode(parent.getRegionInfo(), a.getRegionInfo(), - b.getRegionInfo(), coordinationManager.getServer().getServerName(), zstd, - RS_ZK_REGION_SPLITTING, RS_ZK_REGION_SPLIT)); - - int spins = 0; - // Now wait for the master to process the split. We know it's done - // when the znode is deleted. The reason we keep tickling the znode is - // that it's possible for the master to miss an event. - do { - if (spins % 10 == 0) { - LOG.debug("Still waiting on the master to process the split for " - + parent.getRegionInfo().getEncodedName()); - } - Thread.sleep(100); - // When this returns -1 it means the znode doesn't exist - zstd.setZnodeVersion(transitionSplittingNode(parent.getRegionInfo(), a.getRegionInfo(), - b.getRegionInfo(), coordinationManager.getServer().getServerName(), zstd, - RS_ZK_REGION_SPLIT, RS_ZK_REGION_SPLIT)); - spins++; - } while (zstd.getZnodeVersion() != -1 && !coordinationManager.getServer().isStopped() - && !services.isStopping()); - } catch (Exception e) { - if (e instanceof InterruptedException) { - Thread.currentThread().interrupt(); - } - throw new IOException("Failed telling master about split", e); - } - } - - // Leaving here, the splitdir with its dross will be in place but since the - // split was successful, just leave it; it'll be cleaned when parent is - // deleted and cleaned up. - } - - @Override - public void clean(final HRegionInfo hri) { - try { - // Only delete if its in expected state; could have been hijacked. - if (!ZKAssign.deleteNode(coordinationManager.getServer().getZooKeeper(), - hri.getEncodedName(), RS_ZK_REQUEST_REGION_SPLIT, coordinationManager.getServer() - .getServerName())) { - ZKAssign.deleteNode(coordinationManager.getServer().getZooKeeper(), hri.getEncodedName(), - RS_ZK_REGION_SPLITTING, coordinationManager.getServer().getServerName()); - } - } catch (KeeperException.NoNodeException e) { - LOG.info("Failed cleanup zk node of " + hri.getRegionNameAsString(), e); - } catch (KeeperException e) { - coordinationManager.getServer().abort("Failed cleanup of " + hri.getRegionNameAsString(), e); - } - } - - /** - * ZK-based implementation. Has details about whether the state transition should be reflected in - * ZK, as well as expected version of znode. - */ - public static class ZkSplitTransactionDetails implements - SplitTransactionCoordination.SplitTransactionDetails { - private int znodeVersion; - - public ZkSplitTransactionDetails() { - } - - /** - * @return znode current version - */ - public int getZnodeVersion() { - return znodeVersion; - } - - /** - * @param znodeVersion znode new version - */ - public void setZnodeVersion(int znodeVersion) { - this.znodeVersion = znodeVersion; - } - } - - @Override - public SplitTransactionDetails getDefaultDetails() { - ZkSplitTransactionDetails zstd = new ZkSplitTransactionDetails(); - zstd.setZnodeVersion(-1); - return zstd; - } - - @Override - public int processTransition(HRegionInfo p, HRegionInfo hri_a, HRegionInfo hri_b, ServerName sn, - SplitTransactionDetails std) throws IOException { - return transitionSplittingNode(p, hri_a, hri_b, sn, std, RS_ZK_REQUEST_REGION_SPLIT, - RS_ZK_REGION_SPLITTING); - - } -}