hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From syuanji...@apache.org
Subject [12/23] hbase git commit: HBASE-12986 Compaction pressure based client pushback
Date Thu, 05 Nov 2015 17:37:05 GMT
HBASE-12986 Compaction pressure based client pushback

Signed-off-by: Andrew Purtell <apurtell@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/68717241
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/68717241
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/68717241

Branch: refs/heads/hbase-12439
Commit: 68717241040e6dad04958e49e5373887dd677dfd
Parents: e4bf77e
Author: chenheng <chenheng@fenbi.com>
Authored: Thu Oct 29 11:54:21 2015 +0800
Committer: Andrew Purtell <apurtell@apache.org>
Committed: Tue Nov 3 18:32:08 2015 -0800

----------------------------------------------------------------------
 .../backoff/ExponentialClientBackoffPolicy.java |   5 +-
 .../hbase/client/backoff/ServerStatistics.java  |   7 +
 .../client/TestClientExponentialBackoff.java    |  34 +++-
 .../hbase/protobuf/generated/ClientProtos.java  | 187 +++++++++++++++----
 hbase-protocol/src/main/protobuf/Client.proto   |   2 +
 .../hadoop/hbase/regionserver/HRegion.java      |   2 +
 6 files changed, 200 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/68717241/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
index 5b1d3d2..b41133a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
@@ -70,6 +70,9 @@ public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy
{
 
     // Factor in heap occupancy
     float heapOccupancy = regionStats.getHeapOccupancyPercent() / 100.0f;
+
+    // Factor in compaction pressure, 1.0 means heavy compaction pressure
+    float compactionPressure = regionStats.getCompactionPressure() / 100.0f;
     if (heapOccupancy >= heapOccupancyLowWatermark) {
       // If we are higher than the high watermark, we are already applying max
       // backoff and cannot scale more (see scale() below)
@@ -80,7 +83,7 @@ public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy
{
           scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark,
               0.1, 1.0));
     }
-
+    percent = Math.max(percent, compactionPressure);
     // square the percent as a value less than 1. Closer we move to 100 percent,
     // the percent moves to 1, but squaring causes the exponential curve
     double multiplier = Math.pow(percent, 4.0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/68717241/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
index c7519be..2072573 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
@@ -57,10 +57,12 @@ public class ServerStatistics {
   public static class RegionStatistics {
     private int memstoreLoad = 0;
     private int heapOccupancy = 0;
+    private int compactionPressure = 0;
 
     public void update(ClientProtos.RegionLoadStats currentStats) {
       this.memstoreLoad = currentStats.getMemstoreLoad();
       this.heapOccupancy = currentStats.getHeapOccupancy();
+      this.compactionPressure = currentStats.getCompactionPressure();
     }
 
     public int getMemstoreLoadPercent(){
@@ -70,5 +72,10 @@ public class ServerStatistics {
     public int getHeapOccupancyPercent(){
       return this.heapOccupancy;
     }
+
+    public int getCompactionPressure() {
+      return compactionPressure;
+    }
+
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/68717241/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
index e1a5ce6..dcd4fca 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
@@ -113,22 +113,46 @@ public class TestClientExponentialBackoff {
     ServerStatistics stats = new ServerStatistics();
     long backoffTime;
 
-    update(stats, 0, 95);
+    update(stats, 0, 95, 0);
     backoffTime = backoff.getBackoffTime(server, regionname, stats);
     assertTrue("Heap occupancy at low watermark had no effect", backoffTime > 0);
 
     long previous = backoffTime;
-    update(stats, 0, 96);
+    update(stats, 0, 96, 0);
     backoffTime = backoff.getBackoffTime(server, regionname, stats);
     assertTrue("Increase above low watermark should have increased backoff",
       backoffTime > previous);
 
-    update(stats, 0, 98);
+    update(stats, 0, 98, 0);
     backoffTime = backoff.getBackoffTime(server, regionname, stats);
     assertEquals("We should be using max backoff when at high watermark", backoffTime,
       ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF);
   }
 
+  @Test
+  public void testCompactionPressurePolicy() {
+    Configuration conf = new Configuration(false);
+    ExponentialClientBackoffPolicy backoff = new ExponentialClientBackoffPolicy(conf);
+
+    ServerStatistics stats = new ServerStatistics();
+    long backoffTime;
+
+    update(stats, 0, 0, 0);
+    backoffTime = backoff.getBackoffTime(server, regionname, stats);
+    assertTrue("Compaction pressure has no effect", backoffTime == 0);
+
+        long previous = backoffTime;
+    update(stats, 0, 0, 50);
+    backoffTime = backoff.getBackoffTime(server, regionname, stats);
+    assertTrue("Compaction pressure should be bigger",
+            backoffTime > previous);
+
+    update(stats, 0, 0, 100);
+    backoffTime = backoff.getBackoffTime(server, regionname, stats);
+    assertEquals("under heavy compaction pressure", backoffTime,
+            ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF);
+  }
+
   private void update(ServerStatistics stats, int load) {
     ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder()
         .setMemstoreLoad
@@ -136,10 +160,12 @@ public class TestClientExponentialBackoff {
     stats.update(regionname, stat);
   }
 
-  private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy) {
+  private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy,
+                      int compactionPressure) {
     ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder()
         .setMemstoreLoad(memstoreLoad)
         .setHeapOccupancy(heapOccupancy)
+        .setCompactionPressure(compactionPressure)
             .build();
     stats.update(regionname, stat);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/68717241/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index 9c7ff54..ecc5ebd 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -27489,6 +27489,24 @@ public final class ClientProtos {
      * </pre>
      */
     int getHeapOccupancy();
+
+    // optional int32 compactionPressure = 3 [default = 0];
+    /**
+     * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+     *
+     * <pre>
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * </pre>
+     */
+    boolean hasCompactionPressure();
+    /**
+     * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+     *
+     * <pre>
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * </pre>
+     */
+    int getCompactionPressure();
   }
   /**
    * Protobuf type {@code hbase.pb.RegionLoadStats}
@@ -27556,6 +27574,11 @@ public final class ClientProtos {
               heapOccupancy_ = input.readInt32();
               break;
             }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              compactionPressure_ = input.readInt32();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -27646,9 +27669,34 @@ public final class ClientProtos {
       return heapOccupancy_;
     }
 
+    // optional int32 compactionPressure = 3 [default = 0];
+    public static final int COMPACTIONPRESSURE_FIELD_NUMBER = 3;
+    private int compactionPressure_;
+    /**
+     * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+     *
+     * <pre>
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * </pre>
+     */
+    public boolean hasCompactionPressure() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+     *
+     * <pre>
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * </pre>
+     */
+    public int getCompactionPressure() {
+      return compactionPressure_;
+    }
+
     private void initFields() {
       memstoreLoad_ = 0;
       heapOccupancy_ = 0;
+      compactionPressure_ = 0;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -27668,6 +27716,9 @@ public final class ClientProtos {
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeInt32(2, heapOccupancy_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeInt32(3, compactionPressure_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -27685,6 +27736,10 @@ public final class ClientProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeInt32Size(2, heapOccupancy_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt32Size(3, compactionPressure_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -27718,6 +27773,11 @@ public final class ClientProtos {
         result = result && (getHeapOccupancy()
             == other.getHeapOccupancy());
       }
+      result = result && (hasCompactionPressure() == other.hasCompactionPressure());
+      if (hasCompactionPressure()) {
+        result = result && (getCompactionPressure()
+            == other.getCompactionPressure());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -27739,6 +27799,10 @@ public final class ClientProtos {
         hash = (37 * hash) + HEAPOCCUPANCY_FIELD_NUMBER;
         hash = (53 * hash) + getHeapOccupancy();
       }
+      if (hasCompactionPressure()) {
+        hash = (37 * hash) + COMPACTIONPRESSURE_FIELD_NUMBER;
+        hash = (53 * hash) + getCompactionPressure();
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -27857,6 +27921,8 @@ public final class ClientProtos {
         bitField0_ = (bitField0_ & ~0x00000001);
         heapOccupancy_ = 0;
         bitField0_ = (bitField0_ & ~0x00000002);
+        compactionPressure_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000004);
         return this;
       }
 
@@ -27893,6 +27959,10 @@ public final class ClientProtos {
           to_bitField0_ |= 0x00000002;
         }
         result.heapOccupancy_ = heapOccupancy_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.compactionPressure_ = compactionPressure_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -27915,6 +27985,9 @@ public final class ClientProtos {
         if (other.hasHeapOccupancy()) {
           setHeapOccupancy(other.getHeapOccupancy());
         }
+        if (other.hasCompactionPressure()) {
+          setCompactionPressure(other.getCompactionPressure());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -28044,6 +28117,55 @@ public final class ClientProtos {
         return this;
       }
 
+      // optional int32 compactionPressure = 3 [default = 0];
+      private int compactionPressure_ ;
+      /**
+       * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+       *
+       * <pre>
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * </pre>
+       */
+      public boolean hasCompactionPressure() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+       *
+       * <pre>
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * </pre>
+       */
+      public int getCompactionPressure() {
+        return compactionPressure_;
+      }
+      /**
+       * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+       *
+       * <pre>
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * </pre>
+       */
+      public Builder setCompactionPressure(int value) {
+        bitField0_ |= 0x00000004;
+        compactionPressure_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+       *
+       * <pre>
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * </pre>
+       */
+      public Builder clearCompactionPressure() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        compactionPressure_ = 0;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:hbase.pb.RegionLoadStats)
     }
 
@@ -33308,38 +33430,39 @@ public final class ClientProtos {
       "oprocessorServiceCall\"k\n\014RegionAction\022)\n",
       "\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier" +
       "\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.hbase"
+
-      ".pb.Action\"D\n\017RegionLoadStats\022\027\n\014memstor" +
+      ".pb.Action\"c\n\017RegionLoadStats\022\027\n\014memstor" +
       "eLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010" +
-      "\"\332\001\n\021ResultOrException\022\r\n\005index\030\001 \001(\r\022 \n" +
-      "\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\texcep" +
-      "tion\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022:\n\016s" +
-      "ervice_result\030\004 \001(\0132\".hbase.pb.Coprocess" +
-      "orServiceResult\022,\n\tloadStats\030\005 \001(\0132\031.hba" +
-      "se.pb.RegionLoadStats\"x\n\022RegionActionRes",
-      "ult\0226\n\021resultOrException\030\001 \003(\0132\033.hbase.p" +
-      "b.ResultOrException\022*\n\texception\030\002 \001(\0132\027" +
-      ".hbase.pb.NameBytesPair\"x\n\014MultiRequest\022" +
-      ",\n\014regionAction\030\001 \003(\0132\026.hbase.pb.RegionA" +
-      "ction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tcondition\030\003" +
-      " \001(\0132\023.hbase.pb.Condition\"\\\n\rMultiRespon" +
-      "se\0228\n\022regionActionResult\030\001 \003(\0132\034.hbase.p" +
-      "b.RegionActionResult\022\021\n\tprocessed\030\002 \001(\010*" +
-      "\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\001" +
-      "2\203\004\n\rClientService\0222\n\003Get\022\024.hbase.pb.Get",
-      "Request\032\025.hbase.pb.GetResponse\022;\n\006Mutate" +
-      "\022\027.hbase.pb.MutateRequest\032\030.hbase.pb.Mut" +
-      "ateResponse\0225\n\004Scan\022\025.hbase.pb.ScanReque" +
-      "st\032\026.hbase.pb.ScanResponse\022P\n\rBulkLoadHF" +
-      "ile\022\036.hbase.pb.BulkLoadHFileRequest\032\037.hb" +
-      "ase.pb.BulkLoadHFileResponse\022X\n\013ExecServ" +
-      "ice\022#.hbase.pb.CoprocessorServiceRequest" +
-      "\032$.hbase.pb.CoprocessorServiceResponse\022d" +
-      "\n\027ExecRegionServerService\022#.hbase.pb.Cop" +
-      "rocessorServiceRequest\032$.hbase.pb.Coproc",
-      "essorServiceResponse\0228\n\005Multi\022\026.hbase.pb" +
-      ".MultiRequest\032\027.hbase.pb.MultiResponseBB" +
-      "\n*org.apache.hadoop.hbase.protobuf.gener" +
-      "atedB\014ClientProtosH\001\210\001\001\240\001\001"
+      "\022\035\n\022compactionPressure\030\003 \001(\005:\0010\"\332\001\n\021Resu" +
+      "ltOrException\022\r\n\005index\030\001 \001(\r\022 \n\006result\030\002" +
+      " \001(\0132\020.hbase.pb.Result\022*\n\texception\030\003 \001(" +
+      "\0132\027.hbase.pb.NameBytesPair\022:\n\016service_re" +
+      "sult\030\004 \001(\0132\".hbase.pb.CoprocessorService" +
+      "Result\022,\n\tloadStats\030\005 \001(\0132\031.hbase.pb.Reg",
+      "ionLoadStats\"x\n\022RegionActionResult\0226\n\021re" +
+      "sultOrException\030\001 \003(\0132\033.hbase.pb.ResultO" +
+      "rException\022*\n\texception\030\002 \001(\0132\027.hbase.pb" +
+      ".NameBytesPair\"x\n\014MultiRequest\022,\n\014region" +
+      "Action\030\001 \003(\0132\026.hbase.pb.RegionAction\022\022\n\n" +
+      "nonceGroup\030\002 \001(\004\022&\n\tcondition\030\003 \001(\0132\023.hb" +
+      "ase.pb.Condition\"\\\n\rMultiResponse\0228\n\022reg" +
+      "ionActionResult\030\001 \003(\0132\034.hbase.pb.RegionA" +
+      "ctionResult\022\021\n\tprocessed\030\002 \001(\010*\'\n\013Consis" +
+      "tency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\203\004\n\rClie",
+      "ntService\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025" +
+      ".hbase.pb.GetResponse\022;\n\006Mutate\022\027.hbase." +
+      "pb.MutateRequest\032\030.hbase.pb.MutateRespon" +
+      "se\0225\n\004Scan\022\025.hbase.pb.ScanRequest\032\026.hbas" +
+      "e.pb.ScanResponse\022P\n\rBulkLoadHFile\022\036.hba" +
+      "se.pb.BulkLoadHFileRequest\032\037.hbase.pb.Bu" +
+      "lkLoadHFileResponse\022X\n\013ExecService\022#.hba" +
+      "se.pb.CoprocessorServiceRequest\032$.hbase." +
+      "pb.CoprocessorServiceResponse\022d\n\027ExecReg" +
+      "ionServerService\022#.hbase.pb.CoprocessorS",
+      "erviceRequest\032$.hbase.pb.CoprocessorServ" +
+      "iceResponse\0228\n\005Multi\022\026.hbase.pb.MultiReq" +
+      "uest\032\027.hbase.pb.MultiResponseBB\n*org.apa" +
+      "che.hadoop.hbase.protobuf.generatedB\014Cli" +
+      "entProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -33501,7 +33624,7 @@ public final class ClientProtos {
           internal_static_hbase_pb_RegionLoadStats_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionLoadStats_descriptor,
-              new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", });
+              new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", "CompactionPressure",
});
           internal_static_hbase_pb_ResultOrException_descriptor =
             getDescriptor().getMessageTypes().get(23);
           internal_static_hbase_pb_ResultOrException_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/hbase/blob/68717241/hbase-protocol/src/main/protobuf/Client.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto
index e33f9f2..0119918 100644
--- a/hbase-protocol/src/main/protobuf/Client.proto
+++ b/hbase-protocol/src/main/protobuf/Client.proto
@@ -395,6 +395,8 @@ message RegionLoadStats {
   // Percent JVM heap occupancy. Guaranteed to be positive, between 0 and 100.
   // We can move this to "ServerLoadStats" should we develop them.
   optional int32 heapOccupancy = 2 [default = 0];
+  // Compaction pressure. Guaranteed to be positive, between 0 and 100.
+  optional int32 compactionPressure = 3 [default = 0];
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/68717241/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index e987bc6..459d56e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -6706,6 +6706,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver,
Regi
     stats.setMemstoreLoad((int) (Math.min(100, (this.memstoreSize.get() * 100) / this
         .memstoreFlushSize)));
     stats.setHeapOccupancy((int)rsServices.getHeapMemoryManager().getHeapOccupancyPercent()*100);
+    stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 100 ? 100
:
+                (int)rsServices.getCompactionPressure()*100);
     return stats.build();
   }
 


Mime
View raw message