geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bschucha...@apache.org
Subject [geode] branch develop updated: GEODE-5649 getAll() does not trigger client metadata refresh when primary bucket not known
Date Tue, 04 Sep 2018 15:54:48 GMT
This is an automated email from the ASF dual-hosted git repository.

bschuchardt pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/geode.git


The following commit(s) were added to refs/heads/develop by this push:
     new 72d393e  GEODE-5649 getAll() does not trigger client metadata refresh when primary
bucket not known
72d393e is described below

commit 72d393e649ecf0dfa73993187843bc135a47c516
Author: Bruce Schuchardt <bschuchardt@pivotal.io>
AuthorDate: Tue Sep 4 08:52:46 2018 -0700

    GEODE-5649 getAll() does not trigger client metadata refresh when primary bucket not known
    
    If the primary for a bucket was not known when creating one-hop tasks we
    were not scheduling a metadata refresh.
    
    These changes initiate a refresh but allow the current operation to
    continue as a non-single-hop operation.
    
    This closes #2402
---
 .../cache/PartitionedRegionSingleHopDUnitTest.java |   6 +-
 .../execute/SingleHopGetAllPutAllDUnitTest.java    | 298 +++++++++++++--------
 .../client/internal/ClientMetadataService.java     |  32 ++-
 .../client/internal/SingleHopClientExecutor.java   |   1 -
 .../geode/internal/cache/CachePerfStats.java       |   7 +-
 5 files changed, 223 insertions(+), 121 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleHopDUnitTest.java
b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleHopDUnitTest.java
index 42522f5..7751be2 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleHopDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleHopDUnitTest.java
@@ -396,7 +396,8 @@ public class PartitionedRegionSingleHopDUnitTest extends JUnit4CacheTestCase
{
         .until(() -> cms.isRefreshMetadataTestOnly() == true);
 
     // make sure all fetch tasks are completed
-    Awaitility.waitAtMost(60, TimeUnit.SECONDS).until(() -> cms.getRefreshTaskCount()
== 0);
+    Awaitility.waitAtMost(60, TimeUnit.SECONDS)
+        .until(() -> cms.getRefreshTaskCount_TEST_ONLY() == 0);
 
     cms.satisfyRefreshMetadata_TEST_ONLY(false);
     region.put(new Integer(0), "create0");
@@ -1940,7 +1941,8 @@ public class PartitionedRegionSingleHopDUnitTest extends JUnit4CacheTestCase
{
   private void verifyMetadata() {
     ClientMetadataService cms = ((GemFireCacheImpl) cache).getClientMetadataService();
     // make sure all fetch tasks are completed
-    Awaitility.waitAtMost(60, TimeUnit.SECONDS).until(() -> cms.getRefreshTaskCount()
== 0);
+    Awaitility.waitAtMost(60, TimeUnit.SECONDS)
+        .until(() -> cms.getRefreshTaskCount_TEST_ONLY() == 0);
 
     // final Map<String, ClientPartitionAdvisor> regionMetaData = cms
     // .getClientPRMetadata_TEST_ONLY();
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/SingleHopGetAllPutAllDUnitTest.java
b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/SingleHopGetAllPutAllDUnitTest.java
index df7ee34..238ed1a 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/SingleHopGetAllPutAllDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/SingleHopGetAllPutAllDUnitTest.java
@@ -14,31 +14,35 @@
  */
 package org.apache.geode.internal.cache.execute;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
 
-import org.junit.Ignore;
+import org.awaitility.Awaitility;
+import org.junit.Before;
 import org.junit.Test;
 
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.client.internal.ClientMetadataService;
 import org.apache.geode.cache.client.internal.ClientPartitionAdvisor;
 import org.apache.geode.distributed.DistributedSystem;
+import org.apache.geode.internal.cache.BucketServerLocation66;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.LocalRegion;
-import org.apache.geode.test.dunit.Assert;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
 
 
+/**
+ * This class tests single-hop bulk operations in client caches. Single-hop makes use
+ * of metadata concerning partitioned region bucket locations to find primary buckets
+ * on which to operate. If the metadata is incorrect it forces scheduling of a refresh.
+ * A total count of all refresh requests is kept in the metadata service and is used
+ * by this test to verify whether the cache thought the metadata was correct or not.
+ */
 public class SingleHopGetAllPutAllDUnitTest extends PRClientServerTestBase {
 
 
@@ -49,33 +53,28 @@ public class SingleHopGetAllPutAllDUnitTest extends PRClientServerTestBase
{
 
   }
 
-  /*
-   * Do a getAll from client and see if all the values are returned. Will also have to see
if the
-   * function was routed from client to all the servers hosting the data.
-   */
-  @Ignore("Disabled due to bug #50618")
-  @Test
-  public void testServerGetAllFunction() {
-    createScenario();
-    client.invoke(() -> SingleHopGetAllPutAllDUnitTest.getAll());
-  }
-
-  private void createScenario() {
+  @Before
+  public void createScenario() {
     ArrayList commonAttributes =
-        createCommonServerAttributes("TestPartitionedRegion", null, 1, 13, null);
+        createCommonServerAttributes("TestPartitionedRegion", null, 2, 13, null);
     createClientServerScenarioSingleHop(commonAttributes, 20, 20, 20);
   }
 
-  public static void getAll() {
-    Region region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final List testValueList = new ArrayList();
-    final List testKeyList = new ArrayList();
-    for (int i = (totalNumBuckets.intValue() * 3); i > 0; i--) {
-      testValueList.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    try {
+  /**
+   * populate the region, do a getAll, verify that metadata is fetched
+   * do another getAll and verify that metadata did not need to be refetched
+   */
+  @Test
+  public void testGetAllInClient() {
+    client.invoke("testGetAllInClient", () -> {
+      Region region = cache.getRegion(PartitionedRegionName);
+      assertThat(region).isNotNull();
+      final List testValueList = new ArrayList();
+      final List testKeyList = new ArrayList();
+      for (int i = (totalNumBuckets.intValue() * 3); i > 0; i--) {
+        testValueList.add("execKey-" + i);
+      }
+      DistributedSystem.setThreadsSocketPolicy(false);
       int j = 0;
       Map origVals = new HashMap();
       for (Iterator i = testValueList.iterator(); i.hasNext();) {
@@ -89,108 +88,191 @@ public class SingleHopGetAllPutAllDUnitTest extends PRClientServerTestBase
{
       // check if the client meta-data is in synch
       verifyMetadata();
 
-      // check if the function was routed to pruned nodes
+      long metadataRefreshes =
+          ((GemFireCacheImpl) cache).getClientMetadataService()
+              .getTotalRefreshTaskCount_TEST_ONLY();
+
       Map resultMap = region.getAll(testKeyList);
-      assertTrue(resultMap.equals(origVals));
-      Wait.pause(2000);
-      Map secondResultMap = region.getAll(testKeyList);
-      assertTrue(secondResultMap.equals(origVals));
-    } catch (Exception e) {
-      Assert.fail("Test failed after the getAll operation", e);
-    }
+      assertThat(resultMap).isEqualTo(origVals);
+
+      // a new refresh should not have been triggered
+      assertThat(((GemFireCacheImpl) cache).getClientMetadataService()
+          .getTotalRefreshTaskCount_TEST_ONLY())
+              .isEqualTo(metadataRefreshes);
+    });
   }
 
-  private static void verifyMetadata() {
-    Region region = cache.getRegion(PartitionedRegionName);
-    ClientMetadataService cms = ((GemFireCacheImpl) cache).getClientMetadataService();
-    cms.getClientPRMetadata((LocalRegion) region);
 
-    final Map<String, ClientPartitionAdvisor> regionMetaData = cms.getClientPRMetadata_TEST_ONLY();
-
-    WaitCriterion wc = new WaitCriterion() {
+  /**
+   * perform a putAll and ensure that metadata is fetched. Then do another
+   * putAll and ensure that metadata did not need to be refreshed
+   */
+  @Test
+  public void testPutAllInClient() {
+    client.invoke("testPutAllInClient", () -> {
+      Region<String, String> region = cache.getRegion(PartitionedRegionName);
+      assertThat(region).isNotNull();
 
-      public boolean done() {
-        return (regionMetaData.size() == 1);
+      Map<String, String> keysValuesMap = new HashMap<String, String>();
+      List<String> testKeysList = new ArrayList<>();
+      for (int i = (totalNumBuckets.intValue() * 3); i > 0; i--) {
+        testKeysList.add("putAllKey-" + i);
+        keysValuesMap.put("putAllKey-" + i, "values-" + i);
       }
+      DistributedSystem.setThreadsSocketPolicy(false);
 
-      public String description() {
-        return "Region metadat size is not 1. Exisitng size of regionMetaData is "
-            + regionMetaData.size();
-      }
-    };
-    Wait.waitForCriterion(wc, 5000, 200, true);
-    assertTrue(regionMetaData.containsKey(region.getFullPath()));
-    final ClientPartitionAdvisor prMetaData = regionMetaData.get(region.getFullPath());
-    wc = new WaitCriterion() {
+      region.putAll(keysValuesMap);
 
-      public boolean done() {
-        return (prMetaData.getBucketServerLocationsMap_TEST_ONLY().size() == 13);
-      }
+      verifyMetadata();
 
-      public String description() {
-        return "Bucket server location map size is not 13. Exisitng size is :"
-            + prMetaData.getBucketServerLocationsMap_TEST_ONLY().size();
-      }
-    };
-    Wait.waitForCriterion(wc, 5000, 200, true);
-    for (Entry entry : prMetaData.getBucketServerLocationsMap_TEST_ONLY().entrySet()) {
-      assertEquals(2, ((List) entry.getValue()).size());
-    }
+      long metadataRefreshes =
+          ((GemFireCacheImpl) cache).getClientMetadataService()
+              .getTotalRefreshTaskCount_TEST_ONLY();
+
+      region.putAll(keysValuesMap);
+
+      // a new refresh should not have been triggered
+      assertThat(((GemFireCacheImpl) cache).getClientMetadataService()
+          .getTotalRefreshTaskCount_TEST_ONLY())
+              .isEqualTo(metadataRefreshes);
+    });
   }
 
-  /*
-   * Do a getAll from client and see if all the values are returned. Will also have to see
if the
-   * function was routed from client to all the servers hosting the data.
+  /**
+   * Do a putAll and ensure that metadata has been fetched. Then do a removeAll and
+   * ensure that metadata did not need to be refreshed. Finally do a getAll to ensure
+   * that the removeAll did its job.
    */
   @Test
-  public void testServerPutAllFunction() {
-    createScenario();
-    client.invoke(() -> SingleHopGetAllPutAllDUnitTest.putAll());
-  }
+  public void testRemoveAllInClient() {
+    client.invoke("testRemoveAllInClient", () -> {
+      Region<String, String> region = cache.getRegion(PartitionedRegionName);
+      assertThat(region).isNotNull();
 
-  public static void putAll() {
-    Region<String, String> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final Map<String, String> keysValuesMap = new HashMap<String, String>();
-    final List<String> testKeysList = new ArrayList<String>();
-    for (int i = (totalNumBuckets.intValue() * 3); i > 0; i--) {
-      testKeysList.add("execKey-" + i);
-      keysValuesMap.put("execKey-" + i, "values-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    try {
-      // check if the client meta-data is in synch
+      Map<String, String> keysValuesMap = new HashMap<String, String>();
+      List<String> testKeysList = new ArrayList<String>();
+      for (int i = (totalNumBuckets.intValue() * 3); i > 0; i--) {
+        testKeysList.add("putAllKey-" + i);
+        keysValuesMap.put("putAllKey-" + i, "values-" + i);
+      }
+
+      DistributedSystem.setThreadsSocketPolicy(false);
 
-      // check if the function was routed to pruned nodes
-      region.putAll(keysValuesMap);
-      // check the listener
-      // check how the function was executed
-      Wait.pause(2000);
       region.putAll(keysValuesMap);
 
-      // check if the client meta-data is in synch
       verifyMetadata();
 
-      // check if the function was routed to pruned nodes
-      Map<String, String> resultMap = region.getAll(testKeysList);
-      assertTrue(resultMap.equals(keysValuesMap));
-      Wait.pause(2000);
-      Map<String, String> secondResultMap = region.getAll(testKeysList);
-      assertTrue(secondResultMap.equals(keysValuesMap));
+      long metadataRefreshes =
+          ((GemFireCacheImpl) cache).getClientMetadataService()
+              .getTotalRefreshTaskCount_TEST_ONLY();
 
-      // Now test removeAll
       region.removeAll(testKeysList);
+
+      // a new refresh should not have been triggered
+      assertThat(((GemFireCacheImpl) cache).getClientMetadataService()
+          .getTotalRefreshTaskCount_TEST_ONLY())
+              .isEqualTo(metadataRefreshes);
+
       HashMap<String, Object> noValueMap = new HashMap<String, Object>();
       for (String key : testKeysList) {
         noValueMap.put(key, null);
       }
-      assertEquals(noValueMap, region.getAll(testKeysList));
-      Wait.pause(2000); // Why does this test keep pausing for 2 seconds and then do the
exact same
-                        // thing?
-      region.removeAll(testKeysList);
-      assertEquals(noValueMap, region.getAll(testKeysList));
-    } catch (Exception e) {
-      Assert.fail("Test failed after the putAll operation", e);
+
+      assertThat(noValueMap).isEqualTo(region.getAll(testKeysList));
+
+      assertThat(((GemFireCacheImpl) cache).getClientMetadataService()
+          .getTotalRefreshTaskCount_TEST_ONLY())
+              .isEqualTo(metadataRefreshes);
+    });
+  }
+
+  /**
+   * If a client doesn't know the primary location of a bucket it should perform a
+   * metadata refresh. This test purposefully removes all primary location knowledge
+   * from PR metadata in a client cache and then performs a bulk operation. This
+   * should trigger a refresh.
+   */
+  @Test
+  public void testBulkOpInClientWithBadMetadataCausesRefresh() {
+    client.invoke("testBulkOpInClientWithBadMetadataCausesRefresh", () -> {
+      Region region = cache.getRegion(PartitionedRegionName);
+      assertThat(region).isNotNull();
+      final List testValueList = new ArrayList();
+      final List testKeyList = new ArrayList();
+      for (int i = (totalNumBuckets.intValue() * 3); i > 0; i--) {
+        testValueList.add("execKey-" + i);
+      }
+      DistributedSystem.setThreadsSocketPolicy(false);
+      int j = 0;
+      Map origVals = new HashMap();
+      for (Iterator i = testValueList.iterator(); i.hasNext();) {
+        testKeyList.add(j);
+        Integer key = new Integer(j++);
+        Object val = i.next();
+        origVals.put(key, val);
+        region.put(key, val);
+      }
+
+      // check if the client meta-data is in synch
+      verifyMetadata();
+
+      long metadataRefreshes =
+          ((GemFireCacheImpl) cache).getClientMetadataService()
+              .getTotalRefreshTaskCount_TEST_ONLY();
+
+      removePrimaryMetadata();
+
+      Map resultMap = region.getAll(testKeyList);
+      assertThat(resultMap).isEqualTo(origVals);
+
+      // a new refresh should have been triggered
+      assertThat(((GemFireCacheImpl) cache).getClientMetadataService()
+          .getTotalRefreshTaskCount_TEST_ONLY())
+              .isNotEqualTo(metadataRefreshes);
+    });
+  }
+
+
+  private void verifyMetadata() {
+    Region region = cache.getRegion(PartitionedRegionName);
+    ClientMetadataService cms = ((GemFireCacheImpl) cache).getClientMetadataService();
+    cms.getClientPRMetadata((LocalRegion) region);
+
+    final Map<String, ClientPartitionAdvisor> regionMetaData = cms.getClientPRMetadata_TEST_ONLY();
+
+    Awaitility.await().atMost(5, TimeUnit.MINUTES).until(() -> regionMetaData.size() >
0);
+    assertThat(regionMetaData).containsKey(region.getFullPath());
+    Awaitility.await().atMost(5, TimeUnit.MINUTES).until(() -> {
+      ClientPartitionAdvisor prMetaData = regionMetaData.get(region.getFullPath());
+      assertThat(prMetaData).isNotNull();
+      assertThat(prMetaData.adviseRandomServerLocation()).isNotNull();
+      return true;
+    });
+  }
+
+  private void removePrimaryMetadata() {
+    Region region = cache.getRegion(PartitionedRegionName);
+    ClientMetadataService cms = ((GemFireCacheImpl) cache).getClientMetadataService();
+    cms.getClientPRMetadata((LocalRegion) region);
+
+    final Map<String, ClientPartitionAdvisor> regionMetaData = cms.getClientPRMetadata_TEST_ONLY();
+
+    final ClientPartitionAdvisor prMetaData = regionMetaData.get(region.getFullPath());
+    Map<Integer, List<BucketServerLocation66>> bucketLocations =
+        prMetaData.getBucketServerLocationsMap_TEST_ONLY();
+    for (Map.Entry<Integer, List<BucketServerLocation66>> locationEntry : bucketLocations
+        .entrySet()) {
+      List<BucketServerLocation66> newList = new ArrayList<>(locationEntry.getValue());
+      for (Iterator<BucketServerLocation66> bucketIterator = newList.iterator(); bucketIterator
+          .hasNext();) {
+        BucketServerLocation66 location = bucketIterator.next();
+        if (location.isPrimary()) {
+          bucketIterator.remove();
+        }
+        bucketLocations.put(locationEntry.getKey(), newList);
+      }
+
     }
   }
+
 }
diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java
b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java
index 8c51a49..db24aa0 100755
--- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java
@@ -41,6 +41,7 @@ import org.apache.geode.distributed.internal.ServerLocation;
 import org.apache.geode.internal.cache.BucketServerLocation66;
 import org.apache.geode.internal.cache.EntryOperationImpl;
 import org.apache.geode.internal.cache.InternalRegion;
+import org.apache.geode.internal.cache.LocalRegion;
 import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.internal.cache.PartitionedRegionHelper;
 import org.apache.geode.internal.i18n.LocalizedStrings;
@@ -77,8 +78,12 @@ public class ClientMetadataService {
 
   private boolean isMetadataRefreshed_TEST_ONLY = false;
 
+  /** for testing - the current number of metadata refresh tasks running or queued to run
*/
   private int refreshTaskCount = 0;
 
+  /** for testing - the total number of scheduled metadata refreshes */
+  private long totalRefreshTaskCount = 0;
+
   private Set<String> regionsBeingRefreshed = new HashSet<>();
 
   private final Object fetchTaskCountLock = new Object();
@@ -211,9 +216,14 @@ public class ClientMetadataService {
 
     HashMap<ServerLocation, HashSet> serverToKeysMap = new HashMap<ServerLocation,
HashSet>();
     HashMap<ServerLocation, HashSet<Integer>> serverToBuckets =
-        groupByServerToBuckets(prAdvisor, bucketToKeysMap.keySet(), primaryMembersNeeded);
+        groupByServerToBuckets(prAdvisor, bucketToKeysMap.keySet(), primaryMembersNeeded,
region);
 
     if (serverToBuckets == null) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("One or more primary bucket locations are unknown "
+            + "- scheduling metadata refresh for region {}", region.getFullPath());
+      }
+      scheduleGetPRMetaData((LocalRegion) region, false);
       return null;
     }
 
@@ -251,7 +261,7 @@ public class ClientMetadataService {
     for (int i = 0; i < totalNumberOfBuckets; i++) {
       allBucketIds.add(i);
     }
-    return groupByServerToBuckets(prAdvisor, allBucketIds, primaryOnly);
+    return groupByServerToBuckets(prAdvisor, allBucketIds, primaryOnly, region);
   }
 
   /**
@@ -259,7 +269,8 @@ public class ClientMetadataService {
    * are not available due to mismatch in metadata it should fill up a random server for
it.
    */
   private HashMap<ServerLocation, HashSet<Integer>> groupByServerToBuckets(
-      ClientPartitionAdvisor prAdvisor, Set<Integer> bucketSet, boolean primaryOnly)
{
+      ClientPartitionAdvisor prAdvisor, Set<Integer> bucketSet, boolean primaryOnly,
+      Region region) {
     if (primaryOnly) {
       HashMap<ServerLocation, HashSet<Integer>> serverToBucketsMap =
           new HashMap<ServerLocation, HashSet<Integer>>();
@@ -269,6 +280,9 @@ public class ClientMetadataService {
           // If we don't have the metadata for some buckets, return
           // null, indicating that we don't have any metadata. This
           // will cause us to use the non-single hop path.
+          logger.info("Primary for bucket {} is not known for Region {}.  "
+              + "Known server locations: {}", bucketId, region.getFullPath(),
+              prAdvisor.adviseServerLocations(bucketId));
           return null;
         }
         HashSet<Integer> buckets = serverToBucketsMap.get(server);
@@ -505,6 +519,7 @@ public class ClientMetadataService {
     } else {
       synchronized (fetchTaskCountLock) {
         refreshTaskCount++;
+        totalRefreshTaskCount++;
       }
       Runnable fetchTask = new Runnable() {
         @SuppressWarnings("synthetic-access")
@@ -615,6 +630,7 @@ public class ClientMetadataService {
         }
         regionsBeingRefreshed.add(region.getFullPath());
         refreshTaskCount++;
+        totalRefreshTaskCount++;
       }
       Runnable fetchTask = new Runnable() {
         @SuppressWarnings("synthetic-access")
@@ -828,9 +844,17 @@ public class ClientMetadataService {
     this.isMetadataStable = isMetadataStable;
   }
 
-  public int getRefreshTaskCount() {
+  /** For Testing */
+  public int getRefreshTaskCount_TEST_ONLY() {
     synchronized (fetchTaskCountLock) {
       return refreshTaskCount;
     }
   }
+
+  /** for testing */
+  public long getTotalRefreshTaskCount_TEST_ONLY() {
+    synchronized (fetchTaskCountLock) {
+      return totalRefreshTaskCount;
+    }
+  }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/SingleHopClientExecutor.java
b/geode-core/src/main/java/org/apache/geode/cache/client/internal/SingleHopClientExecutor.java
index a6e7c68..a206a1b 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/SingleHopClientExecutor.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/SingleHopClientExecutor.java
@@ -235,7 +235,6 @@ public class SingleHopClientExecutor {
         Iterator futureItr = futures.iterator();
         Iterator taskItr = callableTasks.iterator();
         RuntimeException rte = null;
-        final boolean isDebugEnabled = logger.isDebugEnabled();
         while (futureItr.hasNext() && !execService.isShutdown() && !execService.isTerminated())
{
           Future fut = (Future) futureItr.next();
           SingleHopOperationCallable task = (SingleHopOperationCallable) taskItr.next();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
index ed36543..c78fc38 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
@@ -282,8 +282,6 @@ public class CachePerfStats {
     final String reliableRegionsMissingNoAccessDesc =
         "Current number of regions configured for reliablity that are missing required roles
with No access";
     final String clearsDesc = "The total number of times a clear has been done on this cache.";
-    final String nonSingleHopsDesc =
-        "Total number of times client request observed more than one hop during operation.";
     final String metaDataRefreshCountDesc =
         "Total number of times the meta data is refreshed due to hopping observed.";
     final String conflatedEventsDesc =
@@ -472,11 +470,8 @@ public class CachePerfStats {
                 "Number of jobs waiting to be picked up by evictor threads", "jobs"),
             f.createLongCounter("evictWorkTime",
                 "Total time spent doing eviction work in background threads", "nanoseconds",
false),
-            f.createLongCounter("nonSingleHopsCount", nonSingleHopsDesc,
-                "Total number of times client request observed more than one hop during operation.",
-                false),
             f.createLongCounter("metaDataRefreshCount", metaDataRefreshCountDesc,
-                "Total number of times the meta data is refreshed due to hopping.", false),
+                "refreshes", false),
             f.createIntCounter("deltaUpdates", deltaUpdatesDesc, "operations"),
             f.createLongCounter("deltaUpdatesTime", deltaUpdatesTimeDesc, "nanoseconds",
false),
             f.createIntCounter("deltaFailedUpdates", deltaFailedUpdatesDesc, "operations"),


Mime
View raw message