hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e...@apache.org
Subject [43/49] HBASE-10357 Failover RPC's for scans (Devaraj Das)
Date Sat, 28 Jun 2014 00:31:29 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/5a8f3f7c/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index bd1c216..6956b31 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -17404,6 +17404,16 @@ public final class ClientProtos {
      */
     org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultsOrBuilder(
         int index);
+
+    // optional bool stale = 6;
+    /**
+     * <code>optional bool stale = 6;</code>
+     */
+    boolean hasStale();
+    /**
+     * <code>optional bool stale = 6;</code>
+     */
+    boolean getStale();
   }
   /**
    * Protobuf type {@code ScanResponse}
@@ -17506,6 +17516,11 @@ public final class ClientProtos {
               results_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.PARSER,
extensionRegistry));
               break;
             }
+            case 48: {
+              bitField0_ |= 0x00000008;
+              stale_ = input.readBool();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -17719,12 +17734,29 @@ public final class ClientProtos {
       return results_.get(index);
     }
 
+    // optional bool stale = 6;
+    public static final int STALE_FIELD_NUMBER = 6;
+    private boolean stale_;
+    /**
+     * <code>optional bool stale = 6;</code>
+     */
+    public boolean hasStale() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    /**
+     * <code>optional bool stale = 6;</code>
+     */
+    public boolean getStale() {
+      return stale_;
+    }
+
     private void initFields() {
       cellsPerResult_ = java.util.Collections.emptyList();
       scannerId_ = 0L;
       moreResults_ = false;
       ttl_ = 0;
       results_ = java.util.Collections.emptyList();
+      stale_ = false;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -17753,6 +17785,9 @@ public final class ClientProtos {
       for (int i = 0; i < results_.size(); i++) {
         output.writeMessage(5, results_.get(i));
       }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeBool(6, stale_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -17787,6 +17822,10 @@ public final class ClientProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(5, results_.get(i));
       }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(6, stale_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -17829,6 +17868,11 @@ public final class ClientProtos {
       }
       result = result && getResultsList()
           .equals(other.getResultsList());
+      result = result && (hasStale() == other.hasStale());
+      if (hasStale()) {
+        result = result && (getStale()
+            == other.getStale());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -17862,6 +17906,10 @@ public final class ClientProtos {
         hash = (37 * hash) + RESULTS_FIELD_NUMBER;
         hash = (53 * hash) + getResultsList().hashCode();
       }
+      if (hasStale()) {
+        hash = (37 * hash) + STALE_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getStale());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -17992,6 +18040,8 @@ public final class ClientProtos {
         } else {
           resultsBuilder_.clear();
         }
+        stale_ = false;
+        bitField0_ = (bitField0_ & ~0x00000020);
         return this;
       }
 
@@ -18046,6 +18096,10 @@ public final class ClientProtos {
         } else {
           result.results_ = resultsBuilder_.build();
         }
+        if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        result.stale_ = stale_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -18107,6 +18161,9 @@ public final class ClientProtos {
             }
           }
         }
+        if (other.hasStale()) {
+          setStale(other.getStale());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -18717,6 +18774,39 @@ public final class ClientProtos {
         return resultsBuilder_;
       }
 
+      // optional bool stale = 6;
+      private boolean stale_ ;
+      /**
+       * <code>optional bool stale = 6;</code>
+       */
+      public boolean hasStale() {
+        return ((bitField0_ & 0x00000020) == 0x00000020);
+      }
+      /**
+       * <code>optional bool stale = 6;</code>
+       */
+      public boolean getStale() {
+        return stale_;
+      }
+      /**
+       * <code>optional bool stale = 6;</code>
+       */
+      public Builder setStale(boolean value) {
+        bitField0_ |= 0x00000020;
+        stale_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bool stale = 6;</code>
+       */
+      public Builder clearStale() {
+        bitField0_ = (bitField0_ & ~0x00000020);
+        stale_ = false;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:ScanResponse)
     }
 
@@ -30568,50 +30658,50 @@ public final class ClientProtos {
       "Specifier\022\023\n\004scan\030\002 \001(\0132\005.Scan\022\022\n\nscanne",
       "r_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rcl"
+
       "ose_scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004" +
-      "\"y\n\014ScanResponse\022\030\n\020cells_per_result\030\001 \003" +
-      "(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003" +
-      " \001(\010\022\013\n\003ttl\030\004 \001(\r\022\030\n\007results\030\005 \003(\0132\007.Res"
+
-      "ult\"\263\001\n\024BulkLoadHFileRequest\022 \n\006region\030\001" +
-      " \002(\0132\020.RegionSpecifier\0225\n\013family_path\030\002 " +
-      "\003(\0132 .BulkLoadHFileRequest.FamilyPath\022\026\n" +
-      "\016assign_seq_num\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006f" +
-      "amily\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFi",
-      "leResponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026Coprocesso" +
-      "rServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service_nam" +
-      "e\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007request\030"
+
-      "\004 \002(\014\"9\n\030CoprocessorServiceResult\022\035\n\005val" +
-      "ue\030\001 \001(\0132\016.NameBytesPair\"d\n\031CoprocessorS" +
-      "erviceRequest\022 \n\006region\030\001 \002(\0132\020.RegionSp" +
-      "ecifier\022%\n\004call\030\002 \002(\0132\027.CoprocessorServi" +
-      "ceCall\"]\n\032CoprocessorServiceResponse\022 \n\006" +
-      "region\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005value\030" +
-      "\002 \002(\0132\016.NameBytesPair\"{\n\006Action\022\r\n\005index",
-      "\030\001 \001(\r\022 \n\010mutation\030\002 \001(\0132\016.MutationProto" +
-      "\022\021\n\003get\030\003 \001(\0132\004.Get\022-\n\014service_call\030\004 \001("
+
-      "\0132\027.CoprocessorServiceCall\"Y\n\014RegionActi" +
-      "on\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006" +
-      "atomic\030\002 \001(\010\022\027\n\006action\030\003 \003(\0132\007.Action\"\221\001"
+
-      "\n\021ResultOrException\022\r\n\005index\030\001 \001(\r\022\027\n\006re" +
-      "sult\030\002 \001(\0132\007.Result\022!\n\texception\030\003 \001(\0132\016" +
-      ".NameBytesPair\0221\n\016service_result\030\004 \001(\0132\031" +
-      ".CoprocessorServiceResult\"f\n\022RegionActio" +
-      "nResult\022-\n\021resultOrException\030\001 \003(\0132\022.Res",
-      "ultOrException\022!\n\texception\030\002 \001(\0132\016.Name" +
-      "BytesPair\"G\n\014MultiRequest\022#\n\014regionActio" +
-      "n\030\001 \003(\0132\r.RegionAction\022\022\n\nnonceGroup\030\002 \001" +
-      "(\004\"@\n\rMultiResponse\022/\n\022regionActionResul" +
-      "t\030\001 \003(\0132\023.RegionActionResult*\'\n\013Consiste" +
-      "ncy\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\261\002\n\rClient" +
-      "Service\022 \n\003Get\022\013.GetRequest\032\014.GetRespons" +
-      "e\022)\n\006Mutate\022\016.MutateRequest\032\017.MutateResp" +
-      "onse\022#\n\004Scan\022\014.ScanRequest\032\r.ScanRespons" +
-      "e\022>\n\rBulkLoadHFile\022\025.BulkLoadHFileReques",
-      "t\032\026.BulkLoadHFileResponse\022F\n\013ExecService" +
-      "\022\032.CoprocessorServiceRequest\032\033.Coprocess" +
-      "orServiceResponse\022&\n\005Multi\022\r.MultiReques" +
-      "t\032\016.MultiResponseBB\n*org.apache.hadoop.h" +
-      "base.protobuf.generatedB\014ClientProtosH\001\210" +
-      "\001\001\240\001\001"
+      "\"\210\001\n\014ScanResponse\022\030\n\020cells_per_result\030\001 " +
+      "\003(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030" +
+      "\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022\030\n\007results\030\005 \003(\0132\007.Re"
+
+      "sult\022\r\n\005stale\030\006 \001(\010\"\263\001\n\024BulkLoadHFileReq" +
+      "uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\0225" +
+      "\n\013family_path\030\002 \003(\0132 .BulkLoadHFileReque" +
+      "st.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n" +
+      "\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(",
+      "\t\"\'\n\025BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002" +
+      "(\010\"a\n\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(" +
+      "\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013method_name\030\003" +
+      " \002(\t\022\017\n\007request\030\004 \002(\014\"9\n\030CoprocessorServ" +
+      "iceResult\022\035\n\005value\030\001 \001(\0132\016.NameBytesPair" +
+      "\"d\n\031CoprocessorServiceRequest\022 \n\006region\030" +
+      "\001 \002(\0132\020.RegionSpecifier\022%\n\004call\030\002 \002(\0132\027." +
+      "CoprocessorServiceCall\"]\n\032CoprocessorSer" +
+      "viceResponse\022 \n\006region\030\001 \002(\0132\020.RegionSpe" +
+      "cifier\022\035\n\005value\030\002 \002(\0132\016.NameBytesPair\"{\n",
+      "\006Action\022\r\n\005index\030\001 \001(\r\022 \n\010mutation\030\002 \001(\013"
+
+      "2\016.MutationProto\022\021\n\003get\030\003 \001(\0132\004.Get\022-\n\014s" +
+      "ervice_call\030\004 \001(\0132\027.CoprocessorServiceCa" +
+      "ll\"Y\n\014RegionAction\022 \n\006region\030\001 \002(\0132\020.Reg" +
+      "ionSpecifier\022\016\n\006atomic\030\002 \001(\010\022\027\n\006action\030\003" +
+      " \003(\0132\007.Action\"\221\001\n\021ResultOrException\022\r\n\005i" +
+      "ndex\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132\007.Result\022!\n\te"
+
+      "xception\030\003 \001(\0132\016.NameBytesPair\0221\n\016servic" +
+      "e_result\030\004 \001(\0132\031.CoprocessorServiceResul" +
+      "t\"f\n\022RegionActionResult\022-\n\021resultOrExcep",
+      "tion\030\001 \003(\0132\022.ResultOrException\022!\n\texcept" +
+      "ion\030\002 \001(\0132\016.NameBytesPair\"G\n\014MultiReques" +
+      "t\022#\n\014regionAction\030\001 \003(\0132\r.RegionAction\022\022" +
+      "\n\nnonceGroup\030\002 \001(\004\"@\n\rMultiResponse\022/\n\022r" +
+      "egionActionResult\030\001 \003(\0132\023.RegionActionRe" +
+      "sult*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMEL" +
+      "INE\020\0012\261\002\n\rClientService\022 \n\003Get\022\013.GetRequ" +
+      "est\032\014.GetResponse\022)\n\006Mutate\022\016.MutateRequ" +
+      "est\032\017.MutateResponse\022#\n\004Scan\022\014.ScanReque" +
+      "st\032\r.ScanResponse\022>\n\rBulkLoadHFile\022\025.Bul",
+      "kLoadHFileRequest\032\026.BulkLoadHFileRespons" +
+      "e\022F\n\013ExecService\022\032.CoprocessorServiceReq" +
+      "uest\032\033.CoprocessorServiceResponse\022&\n\005Mul" +
+      "ti\022\r.MultiRequest\032\016.MultiResponseBB\n*org" +
+      ".apache.hadoop.hbase.protobuf.generatedB" +
+      "\014ClientProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -30713,7 +30803,7 @@ public final class ClientProtos {
           internal_static_ScanResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_ScanResponse_descriptor,
-              new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl",
"Results", });
+              new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl",
"Results", "Stale", });
           internal_static_BulkLoadHFileRequest_descriptor =
             getDescriptor().getMessageTypes().get(14);
           internal_static_BulkLoadHFileRequest_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a8f3f7c/hbase-protocol/src/main/protobuf/Client.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto
index b8ad8d0..8c71ef1 100644
--- a/hbase-protocol/src/main/protobuf/Client.proto
+++ b/hbase-protocol/src/main/protobuf/Client.proto
@@ -289,6 +289,7 @@ message ScanResponse {
   // This field is mutually exclusive with cells_per_result (since the Cells will
   // be inside the pb'd Result)
   repeated Result results = 5;
+  optional bool stale = 6;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a8f3f7c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 599fd1c..a601577 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
@@ -323,7 +324,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   }
 
   private void addResults(final ScanResponse.Builder builder, final List<Result> results,
-      final RpcController controller) {
+      final RpcController controller, boolean isDefaultRegion) {
+    builder.setStale(!isDefaultRegion);
     if (results == null || results.isEmpty()) return;
     if (isClientCellBlockSupport()) {
       for (Result res : results) {
@@ -1942,6 +1944,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             try {
               int i = 0;
               synchronized(scanner) {
+                boolean stale = (region.getRegionInfo().getReplicaId() != 0);
                 for (; i < rows
                     && currentScanResultSize < maxResultSize; ) {
                   // Collect values to be returned here
@@ -1952,7 +1955,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
                         currentScanResultSize += KeyValueUtil.ensureKeyValue(kv).heapSize();
                       }
                     }
-                    results.add(Result.create(values));
+                    results.add(Result.create(values, null, stale));
                     i++;
                   }
                   if (!moreRows) {
@@ -1979,7 +1982,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             moreResults = false;
             results = null;
           } else {
-            addResults(builder, results, controller);
+            addResults(builder, results, controller, RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()));
           }
         } finally {
           // We're done. On way out re-add the above removed lease.

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a8f3f7c/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
index 5f45be3..c04edc1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
@@ -241,7 +241,7 @@ public class TestMultiVersions {
         }
         assertTrue(cellCount == 1);
       }
-      table.close();
+      table.flushCommits();
     }
 
     // Case 1: scan with LATEST_TIMESTAMP. Should get two rows

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a8f3f7c/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
index 6ae0ecd..ea2324c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
@@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
@@ -186,19 +188,20 @@ public class TestMetaReaderEditorNoCluster {
       // to shove this in here first so it gets picked up all over; e.g. by
       // HTable.
       connection = HConnectionTestingUtility.getSpiedConnection(UTIL.getConfiguration());
+      
       // Fix the location lookup so it 'works' though no network.  First
       // make an 'any location' object.
       final HRegionLocation anyLocation =
         new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn);
-      // Return the any location object when locateRegion is called in HTable
-      // constructor and when its called by ServerCallable (it uses getRegionLocation).
+      final RegionLocations rl = new RegionLocations(anyLocation);
+      // Return the RegionLocations object when locateRegion
       // The ugly format below comes of 'Important gotcha on spying real objects!' from
       // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html
-      Mockito.doReturn(anyLocation).
-        when(connection).locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any());
-      Mockito.doReturn(anyLocation).
-        when(connection).getRegionLocation((TableName) Mockito.any(),
-          (byte[]) Mockito.any(), Mockito.anyBoolean());
+      ClusterConnection cConnection =
+          HConnectionTestingUtility.getSpiedClusterConnection(UTIL.getConfiguration());
+      Mockito.doReturn(rl).when
+      (cConnection).locateRegion((TableName)Mockito.any(), (byte[])Mockito.any(),
+              Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt());
 
       // Now shove our HRI implementation into the spied-upon connection.
       Mockito.doReturn(implementation).

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a8f3f7c/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 0f0104a..18aad66 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -160,6 +160,20 @@ public class HConnectionTestingUtility {
     }
   }
 
+  public static ClusterConnection getSpiedClusterConnection(final Configuration conf)
+  throws IOException {
+    HConnectionKey connectionKey = new HConnectionKey(conf);
+    synchronized (ConnectionManager.CONNECTION_INSTANCES) {
+      HConnectionImplementation connection =
+          ConnectionManager.CONNECTION_INSTANCES.get(connectionKey);
+      if (connection == null) {
+        connection = Mockito.spy(new HConnectionImplementation(conf, true));
+        ConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection);
+      }
+      return connection;
+    }
+  }
+
   /**
    * @return Count of extant connection instances
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a8f3f7c/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
index 5542016..1e69afa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
@@ -37,11 +37,12 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
 import org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster;
-import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
@@ -53,9 +54,14 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
+import java.util.Random;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -84,16 +90,44 @@ public class TestReplicasClient {
    */
   public static class SlowMeCopro extends BaseRegionObserver {
     static final AtomicLong sleepTime = new AtomicLong(0);
+    static final AtomicBoolean slowDownNext = new AtomicBoolean(false);
+    static final AtomicInteger countOfNext = new AtomicInteger(0);
     static final AtomicReference<CountDownLatch> cdl =
         new AtomicReference<CountDownLatch>(new CountDownLatch(0));
-
+    Random r = new Random();
     public SlowMeCopro() {
     }
 
     @Override
     public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
                          final Get get, final List<Cell> results) throws IOException
{
+      slowdownCode(e);
+    }
+
+    @Override
+    public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment>
e,
+        final Scan scan, final RegionScanner s) throws IOException {
+      slowdownCode(e);
+      return s;
+    }
+
+    @Override
+    public boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment>
e,
+        final InternalScanner s, final List<Result> results,
+        final int limit, final boolean hasMore) throws IOException {
+      //this will slow down a certain next operation if the conditions are met. The slowness
+      //will allow the call to go to a replica
+      if (slowDownNext.get()) {
+        //have some "next" return successfully from the primary; hence countOfNext checked
+        if (countOfNext.incrementAndGet() == 2) {
+          sleepTime.set(2000);
+          slowdownCode(e);
+        }
+      }
+      return true;
+    }
 
+    private void slowdownCode(final ObserverContext<RegionCoprocessorEnvironment> e)
{
       if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
         CountDownLatch latch = cdl.get();
         try {
@@ -121,7 +155,7 @@ public class TestReplicasClient {
     // enable store file refreshing
     HTU.getConfiguration().setInt(
         StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, REFRESH_PERIOD);
-
+    HTU.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true);
     HTU.startMiniCluster(NB_SERVERS);
 
     // Create table then get the single region for our new table.
@@ -161,6 +195,14 @@ public class TestReplicasClient {
   @Before
   public void before() throws IOException {
     HTU.getHBaseAdmin().getConnection().clearRegionCache();
+    try {
+      openRegion(hriPrimary);
+    } catch (Exception ignored) {
+    }
+    try {
+      openRegion(hriSecondary);
+    } catch (Exception ignored) {
+    }
   }
 
   @After
@@ -169,6 +211,10 @@ public class TestReplicasClient {
       closeRegion(hriSecondary);
     } catch (Exception ignored) {
     }
+    try {
+      closeRegion(hriPrimary);
+    } catch (Exception ignored) {
+    }
     ZKAssign.deleteNodeFailSilent(HTU.getZooKeeperWatcher(), hriPrimary);
     ZKAssign.deleteNodeFailSilent(HTU.getZooKeeperWatcher(), hriSecondary);
 
@@ -180,6 +226,9 @@ public class TestReplicasClient {
   }
 
   private void openRegion(HRegionInfo hri) throws Exception {
+    try {
+      if (isRegionOpened(hri)) return;
+    } catch (Exception e){}
     ZKAssign.createNodeOffline(HTU.getZooKeeperWatcher(), hri, getRS().getServerName());
     // first version is '0'
     AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(
@@ -215,6 +264,10 @@ public class TestReplicasClient {
         ZKAssign.deleteOpenedNode(HTU.getZooKeeperWatcher(), hri.getEncodedName(), null));
   }
 
+  private boolean isRegionOpened(HRegionInfo hri) throws Exception {
+    return getRS().getRegionByEncodedName(hri.getEncodedName()).isAvailable();
+  }
+
   private void checkRegionIsClosed(String encodedRegionName) throws Exception {
 
     while (!getRS().getRegionsInTransitionInRS().isEmpty()) {
@@ -475,4 +528,106 @@ public class TestReplicasClient {
       closeRegion(hriSecondary);
     }
   }
-}
\ No newline at end of file
+
+  @Test
+  public void testScanWithReplicas() throws Exception {
+    //simple scan
+    runMultipleScansOfOneType(false, false);
+  }
+
+  @Test
+  public void testSmallScanWithReplicas() throws Exception {
+    //small scan
+    runMultipleScansOfOneType(false, true);
+  }
+
+  @Test
+  public void testReverseScanWithReplicas() throws Exception {
+    //reverse scan
+    runMultipleScansOfOneType(true, false);
+  }
+
+  private void runMultipleScansOfOneType(boolean reversed, boolean small) throws Exception
{
+    openRegion(hriSecondary);
+    int NUMROWS = 100;
+    try {
+      for (int i = 0; i < NUMROWS; i++) {
+        byte[] b1 = Bytes.toBytes("testUseRegionWithReplica" + i);
+        Put p = new Put(b1);
+        p.add(f, b1, b1);
+        table.put(p);
+      }
+      LOG.debug("PUT done");
+      int caching = 20;
+      byte[] start;
+      if (reversed) start = Bytes.toBytes("testUseRegionWithReplica" + (NUMROWS - 1));
+      else start = Bytes.toBytes("testUseRegionWithReplica" + 0);
+
+      scanWithReplicas(reversed, small, Consistency.TIMELINE, caching, start, NUMROWS, false,
false);
+
+      //Even if we were to slow the server down, unless we ask for stale
+      //we won't get it
+      SlowMeCopro.sleepTime.set(5000);
+      scanWithReplicas(reversed, small, Consistency.STRONG, caching, start, NUMROWS, false,
false);
+      SlowMeCopro.sleepTime.set(0);
+
+      HTU.getHBaseAdmin().flush(table.getTableName());
+      LOG.info("flush done");
+      Thread.sleep(1000 + REFRESH_PERIOD * 2);
+
+      //Now set the flag to get a response even if stale
+      SlowMeCopro.sleepTime.set(5000);
+      scanWithReplicas(reversed, small, Consistency.TIMELINE, caching, start, NUMROWS, true,
false);
+      SlowMeCopro.sleepTime.set(0);
+
+      // now make some 'next' calls slow
+      SlowMeCopro.slowDownNext.set(true);
+      SlowMeCopro.countOfNext.set(0);
+      scanWithReplicas(reversed, small, Consistency.TIMELINE, caching, start, NUMROWS, true,
true);
+      SlowMeCopro.slowDownNext.set(false);
+      SlowMeCopro.countOfNext.set(0);
+    } finally {
+      SlowMeCopro.cdl.get().countDown();
+      SlowMeCopro.sleepTime.set(0);
+      SlowMeCopro.slowDownNext.set(false);
+      SlowMeCopro.countOfNext.set(0);
+      for (int i = 0; i < NUMROWS; i++) {
+        byte[] b1 = Bytes.toBytes("testUseRegionWithReplica" + i);
+        Delete d = new Delete(b1);
+        table.delete(d);
+      }
+      closeRegion(hriSecondary);
+    }
+  }
+
+  private void scanWithReplicas(boolean reversed, boolean small, Consistency consistency,
+      int caching, byte[] startRow, int numRows, boolean staleExpected, boolean slowNext)
+          throws Exception {
+    Scan scan = new Scan(startRow);
+    scan.setCaching(caching);
+    scan.setReversed(reversed);
+    scan.setSmall(small);
+    scan.setConsistency(consistency);
+    ResultScanner scanner = table.getScanner(scan);
+    Iterator<Result> iter = scanner.iterator();
+    HashMap<String, Boolean> map = new HashMap<String, Boolean>();
+    int count = 0;
+    int countOfStale = 0;
+    while (iter.hasNext()) {
+      count++;
+      Result r = iter.next();
+      if (map.containsKey(new String(r.getRow()))) {
+        throw new Exception("Unexpected scan result. Repeated row " + Bytes.toString(r.getRow()));
+      }
+      map.put(new String(r.getRow()), true);
+      if (!slowNext) Assert.assertTrue(r.isStale() == staleExpected);
+      if (r.isStale()) countOfStale++;
+    }
+    LOG.debug("Count of rows " + count + " num rows expected " + numRows);
+    Assert.assertTrue(count == numRows);
+    if (slowNext) {
+      LOG.debug("Count of Stale " + countOfStale);
+      Assert.assertTrue(countOfStale > 1 && countOfStale < numRows);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a8f3f7c/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
index 68923e7..5ba831b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
@@ -189,7 +189,6 @@ public class TestRestoreSnapshotFromClient {
     assertEquals(500, TEST_UTIL.countRows(table, TEST_FAMILY2));
     Set<String> fsFamilies = getFamiliesFromFS(tableName);
     assertEquals(2, fsFamilies.size());
-    table.close();
 
     // Take a snapshot
     admin.disableTable(tableName);
@@ -210,7 +209,6 @@ public class TestRestoreSnapshotFromClient {
     assertEquals(1, htd.getFamilies().size());
     fsFamilies = getFamiliesFromFS(tableName);
     assertEquals(1, fsFamilies.size());
-    table.close();
 
     // Restore back the snapshot (with the cf)
     admin.disableTable(tableName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a8f3f7c/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
index 15eff02..86f5c98 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
@@ -879,7 +879,7 @@ public class TestVisibilityLabels {
       table.put(puts);
     } finally {
       if (table != null) {
-        table.close();
+        table.flushCommits();
       }
     }
     return table;


Mime
View raw message