Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id B5C4A182A0 for ; Wed, 4 Nov 2015 03:02:50 +0000 (UTC) Received: (qmail 86533 invoked by uid 500); 4 Nov 2015 03:02:50 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 86393 invoked by uid 500); 4 Nov 2015 03:02:50 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 86228 invoked by uid 99); 4 Nov 2015 03:02:50 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 04 Nov 2015 03:02:50 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 056E9E095E; Wed, 4 Nov 2015 03:02:50 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: apurtell@apache.org To: commits@hbase.apache.org Date: Wed, 04 Nov 2015 03:02:51 -0000 Message-Id: In-Reply-To: <7f657450217b49759e3d7163d587fb8d@git.apache.org> References: <7f657450217b49759e3d7163d587fb8d@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [3/4] hbase git commit: HBASE-12986 Compaction pressure based client pushback HBASE-12986 Compaction pressure based client pushback Signed-off-by: Andrew Purtell Conflicts: hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f4dbc5de Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f4dbc5de Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f4dbc5de Branch: refs/heads/branch-1.2 Commit: f4dbc5de5fe66b083aa111f0b0746227b3a18089 Parents: 3f49bcb Author: chenheng Authored: Thu Oct 29 11:54:21 2015 +0800 Committer: Andrew Purtell Committed: Tue Nov 3 18:44:02 2015 -0800 ---------------------------------------------------------------------- .../backoff/ExponentialClientBackoffPolicy.java | 5 +- .../hbase/client/backoff/ServerStatistics.java | 7 + .../client/TestClientExponentialBackoff.java | 34 +++- .../hbase/protobuf/generated/ClientProtos.java | 179 ++++++++++++++++--- .../hbase/protobuf/generated/MasterProtos.java | 2 - hbase-protocol/src/main/protobuf/Client.proto | 2 + .../hadoop/hbase/regionserver/HRegion.java | 2 + 7 files changed, 196 insertions(+), 35 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/f4dbc5de/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java index 5b1d3d2..b41133a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java @@ -70,6 +70,9 @@ public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy { // Factor in heap occupancy float heapOccupancy = regionStats.getHeapOccupancyPercent() / 100.0f; + + // Factor in compaction pressure, 1.0 means heavy compaction pressure + float compactionPressure = regionStats.getCompactionPressure() / 100.0f; if (heapOccupancy >= heapOccupancyLowWatermark) { // If we are higher than the high watermark, we are already applying max // backoff and cannot scale more (see scale() below) @@ -80,7 +83,7 @@ public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy { scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark, 0.1, 1.0)); } - + percent = Math.max(percent, compactionPressure); // square the percent as a value less than 1. Closer we move to 100 percent, // the percent moves to 1, but squaring causes the exponential curve double multiplier = Math.pow(percent, 4.0); http://git-wip-us.apache.org/repos/asf/hbase/blob/f4dbc5de/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java index c7519be..2072573 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java @@ -57,10 +57,12 @@ public class ServerStatistics { public static class RegionStatistics { private int memstoreLoad = 0; private int heapOccupancy = 0; + private int compactionPressure = 0; public void update(ClientProtos.RegionLoadStats currentStats) { this.memstoreLoad = currentStats.getMemstoreLoad(); this.heapOccupancy = currentStats.getHeapOccupancy(); + this.compactionPressure = currentStats.getCompactionPressure(); } public int getMemstoreLoadPercent(){ @@ -70,5 +72,10 @@ public class ServerStatistics { public int getHeapOccupancyPercent(){ return this.heapOccupancy; } + + public int getCompactionPressure() { + return compactionPressure; + } + } } http://git-wip-us.apache.org/repos/asf/hbase/blob/f4dbc5de/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java index 01c696c..c594d6e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java @@ -113,22 +113,46 @@ public class TestClientExponentialBackoff { ServerStatistics stats = new ServerStatistics(); long backoffTime; - update(stats, 0, 95); + update(stats, 0, 95, 0); backoffTime = backoff.getBackoffTime(server, regionname, stats); assertTrue("Heap occupancy at low watermark had no effect", backoffTime > 0); long previous = backoffTime; - update(stats, 0, 96); + update(stats, 0, 96, 0); backoffTime = backoff.getBackoffTime(server, regionname, stats); assertTrue("Increase above low watermark should have increased backoff", backoffTime > previous); - update(stats, 0, 98); + update(stats, 0, 98, 0); backoffTime = backoff.getBackoffTime(server, regionname, stats); assertEquals("We should be using max backoff when at high watermark", backoffTime, ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); } + @Test + public void testCompactionPressurePolicy() { + Configuration conf = new Configuration(false); + ExponentialClientBackoffPolicy backoff = new ExponentialClientBackoffPolicy(conf); + + ServerStatistics stats = new ServerStatistics(); + long backoffTime; + + update(stats, 0, 0, 0); + backoffTime = backoff.getBackoffTime(server, regionname, stats); + assertTrue("Compaction pressure has no effect", backoffTime == 0); + + long previous = backoffTime; + update(stats, 0, 0, 50); + backoffTime = backoff.getBackoffTime(server, regionname, stats); + assertTrue("Compaction pressure should be bigger", + backoffTime > previous); + + update(stats, 0, 0, 100); + backoffTime = backoff.getBackoffTime(server, regionname, stats); + assertEquals("under heavy compaction pressure", backoffTime, + ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); + } + private void update(ServerStatistics stats, int load) { ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() .setMemstoreLoad @@ -136,10 +160,12 @@ public class TestClientExponentialBackoff { stats.update(regionname, stat); } - private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy) { + private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy, + int compactionPressure) { ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() .setMemstoreLoad(memstoreLoad) .setHeapOccupancy(heapOccupancy) + .setCompactionPressure(compactionPressure) .build(); stats.update(regionname, stat); } http://git-wip-us.apache.org/repos/asf/hbase/blob/f4dbc5de/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index 681f618..d2c4059 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -27621,6 +27621,24 @@ public final class ClientProtos { * */ int getHeapOccupancy(); + + // optional int32 compactionPressure = 3 [default = 0]; + /** + * optional int32 compactionPressure = 3 [default = 0]; + * + *
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * 
+ */ + boolean hasCompactionPressure(); + /** + * optional int32 compactionPressure = 3 [default = 0]; + * + *
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * 
+ */ + int getCompactionPressure(); } /** * Protobuf type {@code RegionLoadStats} @@ -27688,6 +27706,11 @@ public final class ClientProtos { heapOccupancy_ = input.readInt32(); break; } + case 24: { + bitField0_ |= 0x00000004; + compactionPressure_ = input.readInt32(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -27778,9 +27801,34 @@ public final class ClientProtos { return heapOccupancy_; } + // optional int32 compactionPressure = 3 [default = 0]; + public static final int COMPACTIONPRESSURE_FIELD_NUMBER = 3; + private int compactionPressure_; + /** + * optional int32 compactionPressure = 3 [default = 0]; + * + *
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * 
+ */ + public boolean hasCompactionPressure() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int32 compactionPressure = 3 [default = 0]; + * + *
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * 
+ */ + public int getCompactionPressure() { + return compactionPressure_; + } + private void initFields() { memstoreLoad_ = 0; heapOccupancy_ = 0; + compactionPressure_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -27800,6 +27848,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt32(2, heapOccupancy_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt32(3, compactionPressure_); + } getUnknownFields().writeTo(output); } @@ -27817,6 +27868,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeInt32Size(2, heapOccupancy_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, compactionPressure_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -27850,6 +27905,11 @@ public final class ClientProtos { result = result && (getHeapOccupancy() == other.getHeapOccupancy()); } + result = result && (hasCompactionPressure() == other.hasCompactionPressure()); + if (hasCompactionPressure()) { + result = result && (getCompactionPressure() + == other.getCompactionPressure()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -27871,6 +27931,10 @@ public final class ClientProtos { hash = (37 * hash) + HEAPOCCUPANCY_FIELD_NUMBER; hash = (53 * hash) + getHeapOccupancy(); } + if (hasCompactionPressure()) { + hash = (37 * hash) + COMPACTIONPRESSURE_FIELD_NUMBER; + hash = (53 * hash) + getCompactionPressure(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -27989,6 +28053,8 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000001); heapOccupancy_ = 0; bitField0_ = (bitField0_ & ~0x00000002); + compactionPressure_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -28025,6 +28091,10 @@ public final class ClientProtos { to_bitField0_ |= 0x00000002; } result.heapOccupancy_ = heapOccupancy_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.compactionPressure_ = compactionPressure_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -28047,6 +28117,9 @@ public final class ClientProtos { if (other.hasHeapOccupancy()) { setHeapOccupancy(other.getHeapOccupancy()); } + if (other.hasCompactionPressure()) { + setCompactionPressure(other.getCompactionPressure()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -28176,6 +28249,55 @@ public final class ClientProtos { return this; } + // optional int32 compactionPressure = 3 [default = 0]; + private int compactionPressure_ ; + /** + * optional int32 compactionPressure = 3 [default = 0]; + * + *
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * 
+ */ + public boolean hasCompactionPressure() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int32 compactionPressure = 3 [default = 0]; + * + *
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * 
+ */ + public int getCompactionPressure() { + return compactionPressure_; + } + /** + * optional int32 compactionPressure = 3 [default = 0]; + * + *
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * 
+ */ + public Builder setCompactionPressure(int value) { + bitField0_ |= 0x00000004; + compactionPressure_ = value; + onChanged(); + return this; + } + /** + * optional int32 compactionPressure = 3 [default = 0]; + * + *
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * 
+ */ + public Builder clearCompactionPressure() { + bitField0_ = (bitField0_ & ~0x00000004); + compactionPressure_ = 0; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:RegionLoadStats) } @@ -33431,33 +33553,34 @@ public final class ClientProtos { "call\030\004 \001(\0132\027.CoprocessorServiceCall\"Y\n\014R" + "egionAction\022 \n\006region\030\001 \002(\0132\020.RegionSpec" + "ifier\022\016\n\006atomic\030\002 \001(\010\022\027\n\006action\030\003 \003(\0132\007." + - "Action\"D\n\017RegionLoadStats\022\027\n\014memstoreLoa" + - "d\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\"\266\001\n" + - "\021ResultOrException\022\r\n\005index\030\001 \001(\r\022\027\n\006res" + - "ult\030\002 \001(\0132\007.Result\022!\n\texception\030\003 \001(\0132\016." + - "NameBytesPair\0221\n\016service_result\030\004 \001(\0132\031." + - "CoprocessorServiceResult\022#\n\tloadStats\030\005 " + - "\001(\0132\020.RegionLoadStats\"f\n\022RegionActionRes", - "ult\022-\n\021resultOrException\030\001 \003(\0132\022.ResultO" + - "rException\022!\n\texception\030\002 \001(\0132\016.NameByte" + - "sPair\"f\n\014MultiRequest\022#\n\014regionAction\030\001 " + - "\003(\0132\r.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022\035" + - "\n\tcondition\030\003 \001(\0132\n.Condition\"S\n\rMultiRe" + - "sponse\022/\n\022regionActionResult\030\001 \003(\0132\023.Reg" + - "ionActionResult\022\021\n\tprocessed\030\002 \001(\010*\'\n\013Co" + - "nsistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\205\003\n\r" + - "ClientService\022 \n\003Get\022\013.GetRequest\032\014.GetR" + - "esponse\022)\n\006Mutate\022\016.MutateRequest\032\017.Muta", - "teResponse\022#\n\004Scan\022\014.ScanRequest\032\r.ScanR" + - "esponse\022>\n\rBulkLoadHFile\022\025.BulkLoadHFile" + - "Request\032\026.BulkLoadHFileResponse\022F\n\013ExecS" + - "ervice\022\032.CoprocessorServiceRequest\032\033.Cop" + - "rocessorServiceResponse\022R\n\027ExecRegionSer" + - "verService\022\032.CoprocessorServiceRequest\032\033" + - ".CoprocessorServiceResponse\022&\n\005Multi\022\r.M" + - "ultiRequest\032\016.MultiResponseBB\n*org.apach" + - "e.hadoop.hbase.protobuf.generatedB\014Clien" + - "tProtosH\001\210\001\001\240\001\001" + "Action\"c\n\017RegionLoadStats\022\027\n\014memstoreLoa" + + "d\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022" + + "compactionPressure\030\003 \001(\005:\0010\"\266\001\n\021ResultOr" + + "Exception\022\r\n\005index\030\001 \001(\r\022\027\n\006result\030\002 \001(\013" + + "2\007.Result\022!\n\texception\030\003 \001(\0132\016.NameBytes" + + "Pair\0221\n\016service_result\030\004 \001(\0132\031.Coprocess" + + "orServiceResult\022#\n\tloadStats\030\005 \001(\0132\020.Reg", + "ionLoadStats\"f\n\022RegionActionResult\022-\n\021re" + + "sultOrException\030\001 \003(\0132\022.ResultOrExceptio" + + "n\022!\n\texception\030\002 \001(\0132\016.NameBytesPair\"f\n\014" + + "MultiRequest\022#\n\014regionAction\030\001 \003(\0132\r.Reg" + + "ionAction\022\022\n\nnonceGroup\030\002 \001(\004\022\035\n\tconditi" + + "on\030\003 \001(\0132\n.Condition\"S\n\rMultiResponse\022/\n" + + "\022regionActionResult\030\001 \003(\0132\023.RegionAction" + + "Result\022\021\n\tprocessed\030\002 \001(\010*\'\n\013Consistency" + + "\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\205\003\n\rClientSer" + + "vice\022 \n\003Get\022\013.GetRequest\032\014.GetResponse\022)", + "\n\006Mutate\022\016.MutateRequest\032\017.MutateRespons" + + "e\022#\n\004Scan\022\014.ScanRequest\032\r.ScanResponse\022>" + + "\n\rBulkLoadHFile\022\025.BulkLoadHFileRequest\032\026" + + ".BulkLoadHFileResponse\022F\n\013ExecService\022\032." + + "CoprocessorServiceRequest\032\033.CoprocessorS" + + "erviceResponse\022R\n\027ExecRegionServerServic" + + "e\022\032.CoprocessorServiceRequest\032\033.Coproces" + + "sorServiceResponse\022&\n\005Multi\022\r.MultiReque" + + "st\032\016.MultiResponseBB\n*org.apache.hadoop." + + "hbase.protobuf.generatedB\014ClientProtosH\001", + "\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -33619,7 +33742,7 @@ public final class ClientProtos { internal_static_RegionLoadStats_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionLoadStats_descriptor, - new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", }); + new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", "CompactionPressure", }); internal_static_ResultOrException_descriptor = getDescriptor().getMessageTypes().get(23); internal_static_ResultOrException_fieldAccessorTable = new http://git-wip-us.apache.org/repos/asf/hbase/blob/f4dbc5de/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 596db5a..bfe236c 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -52797,7 +52797,6 @@ public final class MasterProtos { *
        **
        * Turn region normalizer on or off.
-       * If synchronous is true, it waits until current balance() call, if outstanding, to return.
        * 
*/ public abstract void setNormalizerRunning( @@ -54239,7 +54238,6 @@ public final class MasterProtos { *
      **
      * Turn region normalizer on or off.
-     * If synchronous is true, it waits until current balance() call, if outstanding, to return.
      * 
*/ public abstract void setNormalizerRunning( http://git-wip-us.apache.org/repos/asf/hbase/blob/f4dbc5de/hbase-protocol/src/main/protobuf/Client.proto ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index 4d6ebe9..7452cc8 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -399,6 +399,8 @@ message RegionLoadStats { // Percent JVM heap occupancy. Guaranteed to be positive, between 0 and 100. // We can move this to "ServerLoadStats" should we develop them. optional int32 heapOccupancy = 2 [default = 0]; + // Compaction pressure. Guaranteed to be positive, between 0 and 100. + optional int32 compactionPressure = 3 [default = 0]; } /** http://git-wip-us.apache.org/repos/asf/hbase/blob/f4dbc5de/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 30e16de..8834846 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -6790,6 +6790,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi stats.setMemstoreLoad((int) (Math.min(100, (this.memstoreSize.get() * 100) / this .memstoreFlushSize))); stats.setHeapOccupancy((int)rsServices.getHeapMemoryManager().getHeapOccupancyPercent()*100); + stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 100 ? 100 : + (int)rsServices.getCompactionPressure()*100); return stats.build(); }