hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From la...@apache.org
Subject [3/3] hbase git commit: HBASE-12859 New master API to track major compaction completion.
Date Thu, 29 Jan 2015 21:57:22 GMT
HBASE-12859 New master API to track major compaction completion.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1270e590
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1270e590
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1270e590

Branch: refs/heads/master
Commit: 1270e590d1380abc500bf76f9e1e91898c967902
Parents: 6bfa8ea
Author: Lars Hofhansl <larsh@apache.org>
Authored: Thu Jan 29 13:58:27 2015 -0800
Committer: Lars Hofhansl <larsh@apache.org>
Committed: Thu Jan 29 13:58:27 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/ClusterStatus.java  |   23 +
 .../org/apache/hadoop/hbase/RegionLoad.java     |   12 +-
 .../org/apache/hadoop/hbase/client/Admin.java   |   26 +
 .../hadoop/hbase/client/ConnectionManager.java  |   17 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   32 +
 .../hadoop/hbase/io/hfile/HFileContext.java     |   16 +-
 .../hbase/io/hfile/HFileContextBuilder.java     |    9 +-
 .../protobuf/generated/ClusterStatusProtos.java |  135 +-
 .../hbase/protobuf/generated/MasterProtos.java  | 1930 +++++++++++++++++-
 .../src/main/protobuf/ClusterStatus.proto       |    2 +
 hbase-protocol/src/main/protobuf/Master.proto   |   20 +
 .../hbase/io/hfile/AbstractHFileWriter.java     |    3 +
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |    1 +
 .../hadoop/hbase/io/hfile/HFileReaderV2.java    |    1 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   10 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   39 +-
 .../hadoop/hbase/master/MasterServices.java     |   16 +
 .../hadoop/hbase/regionserver/HRegion.java      |   23 +
 .../hbase/regionserver/HRegionServer.java       |    8 +-
 .../hadoop/hbase/regionserver/HStore.java       |    1 +
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   52 +
 .../hadoop/hbase/master/TestCatalogJanitor.java |   12 +
 22 files changed, 2272 insertions(+), 116 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1270e590/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index b93312a..2791a04 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -268,6 +268,29 @@ public class ClusterStatus extends VersionedWritable {
     return masterCoprocessors;
   }
 
+  public long getLastMajorCompactionTsForTable(TableName table) {
+    long result = Long.MAX_VALUE;
+    for (ServerName server : getServers()) {
+      ServerLoad load = getLoad(server);
+      for (RegionLoad rl : load.getRegionsLoad().values()) {
+        if (table.equals(HRegionInfo.getTable(rl.getName()))) {
+          result = Math.min(result, rl.getLastMajorCompactionTs());
+        }
+      }
+    }
+    return result == Long.MAX_VALUE ? 0 : result;
+  }
+
+  public long getLastMajorCompactionTsForRegion(final byte[] region) {
+    for (ServerName server : getServers()) {
+      ServerLoad load = getLoad(server);
+      RegionLoad rl = load.getRegionsLoad().get(region);
+      if (rl != null) {
+        return rl.getLastMajorCompactionTs();
+      }
+    }
+    return 0;
+  }
 
   public boolean isBalancerOn() {
     return balancerOn != null && balancerOn;

http://git-wip-us.apache.org/repos/asf/hbase/blob/1270e590/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
index 234c5ae..794e8b2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
@@ -169,6 +169,14 @@ public class RegionLoad {
     }
     return 0.0f;
   }
+
+  /**
+   * @return the timestamp of the oldest hfile for any store of this region.
+   */
+  public long getLastMajorCompactionTs() {
+    return regionLoadPB.getLastMajorCompactionTs();
+  }
+
   /**
    * @see java.lang.Object#toString()
    */
@@ -179,7 +187,9 @@ public class RegionLoad {
     sb = Strings.appendKeyValue(sb, "numberOfStorefiles",
         this.getStorefiles());
     sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
-        this.getStoreUncompressedSizeMB());
+      this.getStoreUncompressedSizeMB());
+    sb = Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp",
+      this.getLastMajorCompactionTs());
     sb = Strings.appendKeyValue(sb, "storefileSizeMB",
         this.getStorefileSizeMB());
     if (this.getStoreUncompressedSizeMB() != 0) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1270e590/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index c5d9556..70ed231 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -958,6 +958,32 @@ public interface Admin extends Abortable, Closeable {
     final byte[] regionName) throws IOException;
 
   /**
+   * Get the timestamp of the last major compaction for the passed table
+   *
+   * The timestamp of the oldest HFile resulting from a major compaction of that table,
+   * or 0 if no such HFile could be found.
+   *
+   * @param tableName table to examine
+   * @return the last major compaction timestamp or 0
+   * @throws IOException if a remote or network exception occurs
+   */
+  long getLastMajorCompactionTimestamp(final TableName tableName)
+    throws IOException;
+
+  /**
+   * Get the timestamp of the last major compaction for the passed region.
+   *
+   * The timestamp of the oldest HFile resulting from a major compaction of that region,
+   * or 0 if no such HFile could be found.
+   *
+   * @param regionName region to examine
+   * @return the last major compaction timestamp or 0
+   * @throws IOException if a remote or network exception occurs
+   */
+  long getLastMajorCompactionTimestampForRegion(final byte[] regionName)
+      throws IOException;
+
+  /**
    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot
will be
    * taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered
unique
    * based on <b>the name of the snapshot</b>. Attempts to take a snapshot with
the same name (even

http://git-wip-us.apache.org/repos/asf/hbase/blob/1270e590/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index bb1fe7b..c5ddb54 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -136,6 +136,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
@@ -2037,6 +2040,20 @@ final class ConnectionManager {
             throws ServiceException {
           return stub.setQuota(controller, request);
         }
+
+        @Override
+        public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
+            RpcController controller, MajorCompactionTimestampRequest request)
+            throws ServiceException {
+          return stub.getLastMajorCompactionTimestamp(controller, request);
+        }
+
+        @Override
+        public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+            RpcController controller, MajorCompactionTimestampForRegionRequest request)
+            throws ServiceException {
+          return stub.getLastMajorCompactionTimestampForRegion(controller, request);
+        }
       };
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1270e590/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index ec0ee43..d14e369 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfiguratio
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
@@ -115,6 +116,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRes
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
@@ -3771,4 +3774,33 @@ public class HBaseAdmin implements Admin {
       throw new IOException("Failed to get master info port from MasterAddressTracker", e);
     }
   }
+
+  @Override
+  public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException
{
+    return executeCallable(new MasterCallable<Long>(getConnection()) {
+      @Override
+      public Long call(int callTimeout) throws ServiceException {
+        MajorCompactionTimestampRequest req =
+            MajorCompactionTimestampRequest.newBuilder()
+                .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
+        return master.getLastMajorCompactionTimestamp(null, req).getCompactionTimestamp();
+      }
+    });
+  }
+
+  @Override
+  public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException
{
+    return executeCallable(new MasterCallable<Long>(getConnection()) {
+      @Override
+      public Long call(int callTimeout) throws ServiceException {
+        MajorCompactionTimestampForRegionRequest req =
+            MajorCompactionTimestampForRegionRequest
+                .newBuilder()
+                .setRegion(
+                  RequestConverter
+                      .buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build();
+        return master.getLastMajorCompactionTimestampForRegion(null, req).getCompactionTimestamp();
+      }
+    });
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1270e590/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
index ce8b71a..5f43444 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
@@ -56,6 +56,7 @@ public class HFileContext implements HeapSize, Cloneable {
   private DataBlockEncoding encoding = DataBlockEncoding.NONE;
   /** Encryption algorithm and key used */
   private Encryption.Context cryptoContext = Encryption.Context.NONE;
+  private long fileCreateTime;
 
   //Empty constructor.  Go with setters
   public HFileContext() {
@@ -76,12 +77,13 @@ public class HFileContext implements HeapSize, Cloneable {
     this.blocksize = context.blocksize;
     this.encoding = context.encoding;
     this.cryptoContext = context.cryptoContext;
+    this.fileCreateTime = context.fileCreateTime;
   }
 
   public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
       Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
       int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
-      Encryption.Context cryptoContext) {
+      Encryption.Context cryptoContext, long fileCreateTime) {
     this.usesHBaseChecksum = useHBaseChecksum;
     this.includesMvcc =  includesMvcc;
     this.includesTags = includesTags;
@@ -94,6 +96,7 @@ public class HFileContext implements HeapSize, Cloneable {
       this.encoding = encoding;
     }
     this.cryptoContext = cryptoContext;
+    this.fileCreateTime = fileCreateTime;
   }
 
   /**
@@ -141,6 +144,10 @@ public class HFileContext implements HeapSize, Cloneable {
     this.includesTags = includesTags;
   }
 
+  public void setFileCreateTime(long fileCreateTime) {
+    this.fileCreateTime = fileCreateTime;
+  }
+
   public boolean isCompressTags() {
     return compressTags;
   }
@@ -161,6 +168,10 @@ public class HFileContext implements HeapSize, Cloneable {
     return blocksize;
   }
 
+  public long getFileCreateTime() {
+    return fileCreateTime;
+  }
+
   public DataBlockEncoding getDataBlockEncoding() {
     return encoding;
   }
@@ -189,7 +200,8 @@ public class HFileContext implements HeapSize, Cloneable {
         4 * ClassSize.REFERENCE +
         2 * Bytes.SIZEOF_INT +
         // usesHBaseChecksum, includesMvcc, includesTags and compressTags
-        4 * Bytes.SIZEOF_BOOLEAN);
+        4 * Bytes.SIZEOF_BOOLEAN +
+        Bytes.SIZEOF_LONG);
     return size;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1270e590/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
index 5c5d75f..0d1e6ef 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
@@ -52,6 +52,7 @@ public class HFileContextBuilder {
   private DataBlockEncoding encoding = DataBlockEncoding.NONE;
   /** Crypto context */
   private Encryption.Context cryptoContext = Encryption.Context.NONE;
+  private long fileCreateTime = 0;
 
   public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
     this.usesHBaseChecksum = useHBaseCheckSum;
@@ -103,8 +104,14 @@ public class HFileContextBuilder {
     return this;
   }
 
+  public HFileContextBuilder withCreateTime(long fileCreateTime) {
+    this.fileCreateTime = fileCreateTime;
+    return this;
+  }
+
   public HFileContext build() {
     return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression,
-      compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext);
+        compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext,
+        fileCreateTime);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1270e590/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
index 5bc44ff..6dc48fa 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
@@ -2171,6 +2171,16 @@ public final class ClusterStatusProtos {
      * </pre>
      */
     float getDataLocality();
+
+    // optional uint64 last_major_compaction_ts = 17 [default = 0];
+    /**
+     * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+     */
+    boolean hasLastMajorCompactionTs();
+    /**
+     * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+     */
+    long getLastMajorCompactionTs();
   }
   /**
    * Protobuf type {@code RegionLoad}
@@ -2311,6 +2321,11 @@ public final class ClusterStatusProtos {
               dataLocality_ = input.readFloat();
               break;
             }
+            case 136: {
+              bitField0_ |= 0x00010000;
+              lastMajorCompactionTs_ = input.readUInt64();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -2753,6 +2768,22 @@ public final class ClusterStatusProtos {
       return dataLocality_;
     }
 
+    // optional uint64 last_major_compaction_ts = 17 [default = 0];
+    public static final int LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER = 17;
+    private long lastMajorCompactionTs_;
+    /**
+     * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+     */
+    public boolean hasLastMajorCompactionTs() {
+      return ((bitField0_ & 0x00010000) == 0x00010000);
+    }
+    /**
+     * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+     */
+    public long getLastMajorCompactionTs() {
+      return lastMajorCompactionTs_;
+    }
+
     private void initFields() {
       regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
       stores_ = 0;
@@ -2770,6 +2801,7 @@ public final class ClusterStatusProtos {
       totalStaticBloomSizeKB_ = 0;
       completeSequenceId_ = 0L;
       dataLocality_ = 0F;
+      lastMajorCompactionTs_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -2839,6 +2871,9 @@ public final class ClusterStatusProtos {
       if (((bitField0_ & 0x00008000) == 0x00008000)) {
         output.writeFloat(16, dataLocality_);
       }
+      if (((bitField0_ & 0x00010000) == 0x00010000)) {
+        output.writeUInt64(17, lastMajorCompactionTs_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -2912,6 +2947,10 @@ public final class ClusterStatusProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeFloatSize(16, dataLocality_);
       }
+      if (((bitField0_ & 0x00010000) == 0x00010000)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(17, lastMajorCompactionTs_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -3014,6 +3053,11 @@ public final class ClusterStatusProtos {
       if (hasDataLocality()) {
         result = result && (Float.floatToIntBits(getDataLocality())    == Float.floatToIntBits(other.getDataLocality()));
       }
+      result = result && (hasLastMajorCompactionTs() == other.hasLastMajorCompactionTs());
+      if (hasLastMajorCompactionTs()) {
+        result = result && (getLastMajorCompactionTs()
+            == other.getLastMajorCompactionTs());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -3092,6 +3136,10 @@ public final class ClusterStatusProtos {
         hash = (53 * hash) + Float.floatToIntBits(
             getDataLocality());
       }
+      if (hasLastMajorCompactionTs()) {
+        hash = (37 * hash) + LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getLastMajorCompactionTs());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -3238,6 +3286,8 @@ public final class ClusterStatusProtos {
         bitField0_ = (bitField0_ & ~0x00004000);
         dataLocality_ = 0F;
         bitField0_ = (bitField0_ & ~0x00008000);
+        lastMajorCompactionTs_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00010000);
         return this;
       }
 
@@ -3334,6 +3384,10 @@ public final class ClusterStatusProtos {
           to_bitField0_ |= 0x00008000;
         }
         result.dataLocality_ = dataLocality_;
+        if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
+          to_bitField0_ |= 0x00010000;
+        }
+        result.lastMajorCompactionTs_ = lastMajorCompactionTs_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -3398,6 +3452,9 @@ public final class ClusterStatusProtos {
         if (other.hasDataLocality()) {
           setDataLocality(other.getDataLocality());
         }
+        if (other.hasLastMajorCompactionTs()) {
+          setLastMajorCompactionTs(other.getLastMajorCompactionTs());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -4337,6 +4394,39 @@ public final class ClusterStatusProtos {
         return this;
       }
 
+      // optional uint64 last_major_compaction_ts = 17 [default = 0];
+      private long lastMajorCompactionTs_ ;
+      /**
+       * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+       */
+      public boolean hasLastMajorCompactionTs() {
+        return ((bitField0_ & 0x00010000) == 0x00010000);
+      }
+      /**
+       * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+       */
+      public long getLastMajorCompactionTs() {
+        return lastMajorCompactionTs_;
+      }
+      /**
+       * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+       */
+      public Builder setLastMajorCompactionTs(long value) {
+        bitField0_ |= 0x00010000;
+        lastMajorCompactionTs_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+       */
+      public Builder clearLastMajorCompactionTs() {
+        bitField0_ = (bitField0_ & ~0x00010000);
+        lastMajorCompactionTs_ = 0L;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:RegionLoad)
     }
 
@@ -10472,7 +10562,7 @@ public final class ClusterStatusProtos {
       "PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio",
       "nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" +
       "ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" +
-      "e\"\347\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
+      "e\"\214\004\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
       "(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" +
       "storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" +
       "ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" +
@@ -10484,26 +10574,27 @@ public final class ClusterStatusProtos {
       "\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" +
       "(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" +
       "\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" +
-      "ality\030\020 \001(\002\"\212\002\n\nServerLoad\022\032\n\022number_of_" +
-      "requests\030\001 \001(\r\022 \n\030total_number_of_reques" +
-      "ts\030\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_he"
+
-      "ap_MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.Regi" +
-      "onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" +
-      "or\022\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_",
-      "end_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r" +
-      "\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Ser" +
-      "verName\022 \n\013server_load\030\002 \002(\0132\013.ServerLoa" +
-      "d\"\340\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001" +
-      "(\0132\030.HBaseVersionFileContent\022%\n\014live_ser" +
-      "vers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_serv" +
-      "ers\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_tra" +
-      "nsition\030\004 \003(\0132\023.RegionInTransition\022\036\n\ncl" +
-      "uster_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_cop" +
-      "rocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030",
-      "\007 \001(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003" +
-      "(\0132\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*" +
-      "org.apache.hadoop.hbase.protobuf.generat" +
-      "edB\023ClusterStatusProtosH\001\240\001\001"
+      "ality\030\020 \001(\002\022#\n\030last_major_compaction_ts\030" +
+      "\021 \001(\004:\0010\"\212\002\n\nServerLoad\022\032\n\022number_of_req" +
+      "uests\030\001 \001(\r\022 \n\030total_number_of_requests\030" +
+      "\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_heap_" +
+      "MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.RegionL" +
+      "oad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022",
+      "\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_end" +
+      "_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r\"O\n" +
+      "\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Server" +
+      "Name\022 \n\013server_load\030\002 \002(\0132\013.ServerLoad\"\340" +
+      "\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001(\0132" +
+      "\030.HBaseVersionFileContent\022%\n\014live_server" +
+      "s\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_servers" +
+      "\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_transi" +
+      "tion\030\004 \003(\0132\023.RegionInTransition\022\036\n\nclust" +
+      "er_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_coproc",
+      "essors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030\007 \001" +
+      "(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003(\0132" +
+      "\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*org" +
+      ".apache.hadoop.hbase.protobuf.generatedB" +
+      "\023ClusterStatusProtosH\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -10527,7 +10618,7 @@ public final class ClusterStatusProtos {
           internal_static_RegionLoad_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_RegionLoad_descriptor,
-              new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB",
"StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount",
"TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB",
"TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", });
+              new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB",
"StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount",
"TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB",
"TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", });
           internal_static_ServerLoad_descriptor =
             getDescriptor().getMessageTypes().get(3);
           internal_static_ServerLoad_fieldAccessorTable = new


Mime
View raw message