hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject [4/4] hbase git commit: HBASE-12986 Compaction pressure based client pushback
Date Wed, 04 Nov 2015 03:02:52 GMT
HBASE-12986 Compaction pressure based client pushback

Signed-off-by: Andrew Purtell <apurtell@apache.org>

Conflicts:
	hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bf7c0bc0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bf7c0bc0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bf7c0bc0

Branch: refs/heads/0.98
Commit: bf7c0bc09199f8498f7d2a8fb873028155682b9a
Parents: 076ce53
Author: chenheng <chenheng@fenbi.com>
Authored: Thu Oct 29 11:54:21 2015 +0800
Committer: Andrew Purtell <apurtell@apache.org>
Committed: Tue Nov 3 18:46:56 2015 -0800

----------------------------------------------------------------------
 .../backoff/ExponentialClientBackoffPolicy.java |   5 +-
 .../hbase/client/backoff/ServerStatistics.java  |   7 +
 .../client/TestClientExponentialBackoff.java    |  34 +++-
 .../hbase/protobuf/generated/ClientProtos.java  | 175 ++++++++++++++++---
 hbase-protocol/src/main/protobuf/Client.proto   |   2 +
 .../hadoop/hbase/regionserver/HRegion.java      |   2 +
 6 files changed, 194 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/bf7c0bc0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
index 5b1d3d2..b41133a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
@@ -70,6 +70,9 @@ public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy
{
 
     // Factor in heap occupancy
     float heapOccupancy = regionStats.getHeapOccupancyPercent() / 100.0f;
+
+    // Factor in compaction pressure, 1.0 means heavy compaction pressure
+    float compactionPressure = regionStats.getCompactionPressure() / 100.0f;
     if (heapOccupancy >= heapOccupancyLowWatermark) {
       // If we are higher than the high watermark, we are already applying max
       // backoff and cannot scale more (see scale() below)
@@ -80,7 +83,7 @@ public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy
{
           scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark,
               0.1, 1.0));
     }
-
+    percent = Math.max(percent, compactionPressure);
     // square the percent as a value less than 1. Closer we move to 100 percent,
     // the percent moves to 1, but squaring causes the exponential curve
     double multiplier = Math.pow(percent, 4.0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bf7c0bc0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
index c7519be..2072573 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
@@ -57,10 +57,12 @@ public class ServerStatistics {
   public static class RegionStatistics {
     private int memstoreLoad = 0;
     private int heapOccupancy = 0;
+    private int compactionPressure = 0;
 
     public void update(ClientProtos.RegionLoadStats currentStats) {
       this.memstoreLoad = currentStats.getMemstoreLoad();
       this.heapOccupancy = currentStats.getHeapOccupancy();
+      this.compactionPressure = currentStats.getCompactionPressure();
     }
 
     public int getMemstoreLoadPercent(){
@@ -70,5 +72,10 @@ public class ServerStatistics {
     public int getHeapOccupancyPercent(){
       return this.heapOccupancy;
     }
+
+    public int getCompactionPressure() {
+      return compactionPressure;
+    }
+
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bf7c0bc0/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
index 4266eb8..4e37a46 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
@@ -112,22 +112,46 @@ public class TestClientExponentialBackoff {
     ServerStatistics stats = new ServerStatistics();
     long backoffTime;
 
-    update(stats, 0, 95);
+    update(stats, 0, 95, 0);
     backoffTime = backoff.getBackoffTime(server, regionname, stats);
     assertTrue("Heap occupancy at low watermark had no effect", backoffTime > 0);
 
     long previous = backoffTime;
-    update(stats, 0, 96);
+    update(stats, 0, 96, 0);
     backoffTime = backoff.getBackoffTime(server, regionname, stats);
     assertTrue("Increase above low watermark should have increased backoff",
       backoffTime > previous);
 
-    update(stats, 0, 98);
+    update(stats, 0, 98, 0);
     backoffTime = backoff.getBackoffTime(server, regionname, stats);
     assertEquals("We should be using max backoff when at high watermark", backoffTime,
       ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF);
   }
 
+  @Test
+  public void testCompactionPressurePolicy() {
+    Configuration conf = new Configuration(false);
+    ExponentialClientBackoffPolicy backoff = new ExponentialClientBackoffPolicy(conf);
+
+    ServerStatistics stats = new ServerStatistics();
+    long backoffTime;
+
+    update(stats, 0, 0, 0);
+    backoffTime = backoff.getBackoffTime(server, regionname, stats);
+    assertTrue("Compaction pressure has no effect", backoffTime == 0);
+
+        long previous = backoffTime;
+    update(stats, 0, 0, 50);
+    backoffTime = backoff.getBackoffTime(server, regionname, stats);
+    assertTrue("Compaction pressure should be bigger",
+            backoffTime > previous);
+
+    update(stats, 0, 0, 100);
+    backoffTime = backoff.getBackoffTime(server, regionname, stats);
+    assertEquals("under heavy compaction pressure", backoffTime,
+            ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF);
+  }
+
   private void update(ServerStatistics stats, int load) {
     ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder()
         .setMemstoreLoad
@@ -135,10 +159,12 @@ public class TestClientExponentialBackoff {
     stats.update(regionname, stat);
   }
 
-  private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy) {
+  private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy,
+                      int compactionPressure) {
     ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder()
         .setMemstoreLoad(memstoreLoad)
         .setHeapOccupancy(heapOccupancy)
+        .setCompactionPressure(compactionPressure)
             .build();
     stats.update(regionname, stat);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bf7c0bc0/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index 2223bd4..14ddb58 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -25891,6 +25891,24 @@ public final class ClientProtos {
      * </pre>
      */
     int getHeapOccupancy();
+
+    // optional int32 compactionPressure = 3 [default = 0];
+    /**
+     * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+     *
+     * <pre>
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * </pre>
+     */
+    boolean hasCompactionPressure();
+    /**
+     * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+     *
+     * <pre>
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * </pre>
+     */
+    int getCompactionPressure();
   }
   /**
    * Protobuf type {@code RegionLoadStats}
@@ -25958,6 +25976,11 @@ public final class ClientProtos {
               heapOccupancy_ = input.readInt32();
               break;
             }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              compactionPressure_ = input.readInt32();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -26048,9 +26071,34 @@ public final class ClientProtos {
       return heapOccupancy_;
     }
 
+    // optional int32 compactionPressure = 3 [default = 0];
+    public static final int COMPACTIONPRESSURE_FIELD_NUMBER = 3;
+    private int compactionPressure_;
+    /**
+     * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+     *
+     * <pre>
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * </pre>
+     */
+    public boolean hasCompactionPressure() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+     *
+     * <pre>
+     * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+     * </pre>
+     */
+    public int getCompactionPressure() {
+      return compactionPressure_;
+    }
+
     private void initFields() {
       memstoreLoad_ = 0;
       heapOccupancy_ = 0;
+      compactionPressure_ = 0;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -26070,6 +26118,9 @@ public final class ClientProtos {
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeInt32(2, heapOccupancy_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeInt32(3, compactionPressure_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -26087,6 +26138,10 @@ public final class ClientProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeInt32Size(2, heapOccupancy_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt32Size(3, compactionPressure_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -26120,6 +26175,11 @@ public final class ClientProtos {
         result = result && (getHeapOccupancy()
             == other.getHeapOccupancy());
       }
+      result = result && (hasCompactionPressure() == other.hasCompactionPressure());
+      if (hasCompactionPressure()) {
+        result = result && (getCompactionPressure()
+            == other.getCompactionPressure());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -26141,6 +26201,10 @@ public final class ClientProtos {
         hash = (37 * hash) + HEAPOCCUPANCY_FIELD_NUMBER;
         hash = (53 * hash) + getHeapOccupancy();
       }
+      if (hasCompactionPressure()) {
+        hash = (37 * hash) + COMPACTIONPRESSURE_FIELD_NUMBER;
+        hash = (53 * hash) + getCompactionPressure();
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -26259,6 +26323,8 @@ public final class ClientProtos {
         bitField0_ = (bitField0_ & ~0x00000001);
         heapOccupancy_ = 0;
         bitField0_ = (bitField0_ & ~0x00000002);
+        compactionPressure_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000004);
         return this;
       }
 
@@ -26295,6 +26361,10 @@ public final class ClientProtos {
           to_bitField0_ |= 0x00000002;
         }
         result.heapOccupancy_ = heapOccupancy_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.compactionPressure_ = compactionPressure_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -26317,6 +26387,9 @@ public final class ClientProtos {
         if (other.hasHeapOccupancy()) {
           setHeapOccupancy(other.getHeapOccupancy());
         }
+        if (other.hasCompactionPressure()) {
+          setCompactionPressure(other.getCompactionPressure());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -26446,6 +26519,55 @@ public final class ClientProtos {
         return this;
       }
 
+      // optional int32 compactionPressure = 3 [default = 0];
+      private int compactionPressure_ ;
+      /**
+       * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+       *
+       * <pre>
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * </pre>
+       */
+      public boolean hasCompactionPressure() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+       *
+       * <pre>
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * </pre>
+       */
+      public int getCompactionPressure() {
+        return compactionPressure_;
+      }
+      /**
+       * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+       *
+       * <pre>
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * </pre>
+       */
+      public Builder setCompactionPressure(int value) {
+        bitField0_ |= 0x00000004;
+        compactionPressure_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional int32 compactionPressure = 3 [default = 0];</code>
+       *
+       * <pre>
+       * Compaction pressure. Guaranteed to be positive, between 0 and 100.
+       * </pre>
+       */
+      public Builder clearCompactionPressure() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        compactionPressure_ = 0;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:RegionLoadStats)
     }
 
@@ -31691,32 +31813,33 @@ public final class ClientProtos {
       "t\022-\n\014service_call\030\004 \001(\0132\027.CoprocessorSer" +
       "viceCall\"Y\n\014RegionAction\022 \n\006region\030\001 \002(\013" +
       "2\020.RegionSpecifier\022\016\n\006atomic\030\002 \001(\010\022\027\n\006ac" +
-      "tion\030\003 \003(\0132\007.Action\"D\n\017RegionLoadStats\022\027" +
+      "tion\030\003 \003(\0132\007.Action\"c\n\017RegionLoadStats\022\027" +
       "\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy" +
-      "\030\002 \001(\005:\0010\"\266\001\n\021ResultOrException\022\r\n\005index" +
-      "\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132\007.Result\022!\n\texcep"
+
-      "tion\030\003 \001(\0132\016.NameBytesPair\0221\n\016service_re" +
-      "sult\030\004 \001(\0132\031.CoprocessorServiceResult\022#\n" +
-      "\tloadStats\030\005 \001(\0132\020.RegionLoadStats\"f\n\022Re",
-      "gionActionResult\022-\n\021resultOrException\030\001 " +
-      "\003(\0132\022.ResultOrException\022!\n\texception\030\002 \001" +
-      "(\0132\016.NameBytesPair\"f\n\014MultiRequest\022#\n\014re" +
-      "gionAction\030\001 \003(\0132\r.RegionAction\022\022\n\nnonce" +
-      "Group\030\002 \001(\004\022\035\n\tcondition\030\003 \001(\0132\n.Conditi" +
-      "on\"S\n\rMultiResponse\022/\n\022regionActionResul" +
-      "t\030\001 \003(\0132\023.RegionActionResult\022\021\n\tprocesse" +
-      "d\030\002 \001(\0102\205\003\n\rClientService\022 \n\003Get\022\013.GetRe" +
-      "quest\032\014.GetResponse\022)\n\006Mutate\022\016.MutateRe" +
-      "quest\032\017.MutateResponse\022#\n\004Scan\022\014.ScanReq",
-      "uest\032\r.ScanResponse\022>\n\rBulkLoadHFile\022\025.B" +
-      "ulkLoadHFileRequest\032\026.BulkLoadHFileRespo" +
-      "nse\022F\n\013ExecService\022\032.CoprocessorServiceR" +
-      "equest\032\033.CoprocessorServiceResponse\022R\n\027E" +
-      "xecRegionServerService\022\032.CoprocessorServ" +
-      "iceRequest\032\033.CoprocessorServiceResponse\022" +
-      "&\n\005Multi\022\r.MultiRequest\032\016.MultiResponseB" +
-      "B\n*org.apache.hadoop.hbase.protobuf.gene" +
-      "ratedB\014ClientProtosH\001\210\001\001\240\001\001"
+      "\030\002 \001(\005:\0010\022\035\n\022compactionPressure\030\003 \001(\005:\0010"
+
+      "\"\266\001\n\021ResultOrException\022\r\n\005index\030\001 \001(\r\022\027\n" +
+      "\006result\030\002 \001(\0132\007.Result\022!\n\texception\030\003 \001(" +
+      "\0132\016.NameBytesPair\0221\n\016service_result\030\004 \001(" +
+      "\0132\031.CoprocessorServiceResult\022#\n\tloadStat",
+      "s\030\005 \001(\0132\020.RegionLoadStats\"f\n\022RegionActio" +
+      "nResult\022-\n\021resultOrException\030\001 \003(\0132\022.Res" +
+      "ultOrException\022!\n\texception\030\002 \001(\0132\016.Name" +
+      "BytesPair\"f\n\014MultiRequest\022#\n\014regionActio" +
+      "n\030\001 \003(\0132\r.RegionAction\022\022\n\nnonceGroup\030\002 \001" +
+      "(\004\022\035\n\tcondition\030\003 \001(\0132\n.Condition\"S\n\rMul" +
+      "tiResponse\022/\n\022regionActionResult\030\001 \003(\0132\023" +
+      ".RegionActionResult\022\021\n\tprocessed\030\002 \001(\0102\205" +
+      "\003\n\rClientService\022 \n\003Get\022\013.GetRequest\032\014.G" +
+      "etResponse\022)\n\006Mutate\022\016.MutateRequest\032\017.M",
+      "utateResponse\022#\n\004Scan\022\014.ScanRequest\032\r.Sc" +
+      "anResponse\022>\n\rBulkLoadHFile\022\025.BulkLoadHF" +
+      "ileRequest\032\026.BulkLoadHFileResponse\022F\n\013Ex" +
+      "ecService\022\032.CoprocessorServiceRequest\032\033." +
+      "CoprocessorServiceResponse\022R\n\027ExecRegion" +
+      "ServerService\022\032.CoprocessorServiceReques" +
+      "t\032\033.CoprocessorServiceResponse\022&\n\005Multi\022" +
+      "\r.MultiRequest\032\016.MultiResponseBB\n*org.ap" +
+      "ache.hadoop.hbase.protobuf.generatedB\014Cl" +
+      "ientProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -31878,7 +32001,7 @@ public final class ClientProtos {
           internal_static_RegionLoadStats_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_RegionLoadStats_descriptor,
-              new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", });
+              new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", "CompactionPressure",
});
           internal_static_ResultOrException_descriptor =
             getDescriptor().getMessageTypes().get(23);
           internal_static_ResultOrException_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/hbase/blob/bf7c0bc0/hbase-protocol/src/main/protobuf/Client.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto
index 48b0886..b93b0ac 100644
--- a/hbase-protocol/src/main/protobuf/Client.proto
+++ b/hbase-protocol/src/main/protobuf/Client.proto
@@ -352,6 +352,8 @@ message RegionLoadStats {
   // Percent JVM heap occupancy. Guaranteed to be positive, between 0 and 100.
   // We can move this to "ServerLoadStats" should we develop them.
   optional int32 heapOccupancy = 2 [default = 0];
+  // Compaction pressure. Guaranteed to be positive, between 0 and 100.
+  optional int32 compactionPressure = 3 [default = 0];
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/bf7c0bc0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index bf4c3b3..912dc98 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5183,6 +5183,8 @@ public class HRegion implements HeapSize { // , Writable{
     stats.setMemstoreLoad((int) (Math.min(100, (this.memstoreSize.get() * 100) / this
         .memstoreFlushSize)));
     stats.setHeapOccupancy((int)rsServices.getHeapMemoryManager().getHeapOccupancyPercent()*100);
+    stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 100 ? 100
:
+                (int)rsServices.getCompactionPressure()*100);
     return stats.build();
   }
 


Mime
View raw message