geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bschucha...@apache.org
Subject [geode] 01/01: removing unnecessary double spaces
Date Fri, 02 Nov 2018 18:22:14 GMT
This is an automated email from the ASF dual-hosted git repository.

bschuchardt pushed a commit to branch feature/GEODE-5686c
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 6f606c83580f6eede90f5b2318122c368108656d
Author: Bruce Schuchardt <bschuchardt@pivotal.io>
AuthorDate: Thu Nov 1 13:47:29 2018 -0700

    removing unnecessary double spaces
---
 .../geode/cache/ConnectionPoolDUnitTest.java       |  20 +--
 .../internal/AutoConnectionSourceDUnitTest.java    |  10 +-
 .../internal/LocatorLoadBalancingDUnitTest.java    |  10 +-
 .../cache/query/dunit/CompiledInDUnitTest.java     |  44 +++---
 .../geode/cache/query/dunit/PDXQueryTestBase.java  |  10 +-
 .../cache/query/dunit/PdxLocalQueryDUnitTest.java  |   2 +-
 .../geode/cache/query/dunit/PdxQueryDUnitTest.java | 164 ++++++++++-----------
 .../cache/query/dunit/PdxStringQueryDUnitTest.java |  28 ++--
 .../cache/query/dunit/QueryUsingPoolDUnitTest.java |  44 +++---
 .../cache/query/dunit/RemoteQueryDUnitTest.java    |  42 +++---
 .../index/PutAllWithIndexPerfDUnitTest.java        |   6 +-
 .../apache/geode/cache30/CacheXml66DUnitTest.java  |   2 +-
 .../geode/cache30/ClientMembershipDUnitTest.java   |  12 +-
 .../cache30/ClientRegisterInterestDUnitTest.java   |  40 ++---
 .../geode/internal/cache/GridAdvisorDUnitTest.java |  38 ++---
 ...istributedRegionFunctionExecutionDUnitTest.java |   4 +-
 .../cache/execute/PRClientServerTestBase.java      |   4 +-
 .../cache/ha/HARQueueNewImplDUnitTest.java         |  14 +-
 .../CacheServerTransactionsSelectorDUnitTest.java  |   2 +-
 .../sockets/ForceInvalidateEvictionDUnitTest.java  |   2 +-
 .../InterestListEndpointSelectorDUnitTest.java     |   2 +-
 .../tier/sockets/RedundancyLevelTestBase.java      |   2 +-
 .../tier/sockets/ReliableMessagingDUnitTest.java   |   2 +-
 ...niversalMembershipListenerAdapterDUnitTest.java |   8 +-
 ...tMessagesRegionCreationAndDestroyJUnitTest.java |  12 +-
 .../geode/internal/cache/DiskRegionJUnitTest.java  |   4 +-
 .../src/main/java/org/apache/geode/CopyHelper.java |   4 +-
 .../main/java/org/apache/geode/DataSerializer.java |   2 +-
 .../main/java/org/apache/geode/Instantiator.java   |   4 +-
 .../main/java/org/apache/geode/SystemFailure.java  |   2 +-
 .../java/org/apache/geode/admin/AlertLevel.java    |   2 +-
 .../geode/admin/SystemMemberBridgeServer.java      |  72 ++++-----
 .../admin/internal/AdminDistributedSystemImpl.java |  16 +-
 .../admin/internal/ConfigurationParameterImpl.java |   8 +-
 .../internal/DistributedSystemConfigImpl.java      |  10 +-
 .../internal/DistributionLocatorConfigImpl.java    |   2 +-
 .../admin/internal/ManagedEntityConfigXml.java     |   2 +-
 .../internal/SystemMemberBridgeServerImpl.java     |  12 +-
 .../admin/internal/SystemMemberCacheImpl.java      |   4 +-
 .../geode/admin/jmx/internal/AgentConfigImpl.java  |   4 +-
 .../apache/geode/admin/jmx/internal/AgentImpl.java |  12 +-
 .../geode/admin/jmx/internal/AgentLauncher.java    |  19 ++-
 .../admin/jmx/internal/GenerateMBeanHTML.java      |   2 +-
 .../apache/geode/admin/jmx/internal/MBeanUtil.java |   2 +-
 .../geode/admin/jmx/internal/MX4JModelMBean.java   |  18 +--
 .../jmx/internal/SystemMemberCacheJmxImpl.java     |   2 +-
 .../java/org/apache/geode/cache/DataPolicy.java    |   2 +-
 .../geode/cache/DiskWriteAttributesFactory.java    |   6 +-
 .../java/org/apache/geode/cache/LossAction.java    |   4 +-
 .../java/org/apache/geode/cache/RegionFactory.java |   2 +-
 .../org/apache/geode/cache/ResumptionAction.java   |   4 +-
 .../query/internal/AbstractCompiledValue.java      |   2 +-
 .../cache/query/internal/AttributeDescriptor.java  |   2 +-
 .../geode/cache/query/internal/CompiledIn.java     |   2 +-
 .../query/internal/CompiledIndexOperation.java     |   2 +-
 .../cache/query/internal/CompiledIteratorDef.java  |   2 +-
 .../cache/query/internal/CompiledNegation.java     |   2 +-
 .../geode/cache/query/internal/CompiledRegion.java |   2 +-
 .../geode/cache/query/internal/CompiledSelect.java |   2 +-
 .../cache/query/internal/CompiledUnaryMinus.java   |   2 +-
 .../geode/cache/query/internal/DefaultQuery.java   |   2 +-
 .../cache/query/internal/DefaultQueryService.java  |   8 +-
 .../cache/query/internal/ExecutionContext.java     |   2 +-
 .../geode/cache/query/internal/Functions.java      |   4 +-
 .../geode/cache/query/internal/MethodDispatch.java |   4 +-
 .../geode/cache/query/internal/QCompiler.java      |  12 +-
 .../geode/cache/query/internal/StructBag.java      |   2 +-
 .../geode/cache/query/internal/StructSet.java      |   2 +-
 .../cache/query/internal/index/IndexManager.java   |   2 +-
 .../cache/query/internal/index/RangeIndex.java     |  10 +-
 .../cache/query/internal/parse/ASTLiteral.java     |  12 +-
 .../cache/query/internal/parse/ASTUnsupported.java |   2 +-
 .../cache/query/internal/types/StructTypeImpl.java |   2 +-
 .../cache/query/internal/types/TypeUtils.java      |   4 +-
 .../server/internal/ConnectionCountProbe.java      |   6 +-
 .../geode/cache/server/internal/LoadMonitor.java   |   6 +-
 .../cache/server/internal/ServerMetricsImpl.java   |   2 +-
 .../internal/AbstractDistributionConfig.java       |   8 +-
 .../internal/ClusterDistributionManager.java       |   6 +-
 .../internal/DistributionConfigImpl.java           |   2 +-
 .../distributed/internal/DistributionMessage.java  |   6 +-
 .../internal/InternalDistributedSystem.java        |   6 +-
 .../geode/distributed/internal/ServerLocation.java |   2 +-
 .../distributed/internal/locks/DLockGrantor.java   |   4 +-
 .../distributed/internal/locks/DLockService.java   |   8 +-
 .../membership/gms/messenger/JGroupsMessenger.java |   2 +-
 .../membership/gms/mgr/GMSMembershipManager.java   |   6 +-
 .../org/apache/geode/internal/AbstractConfig.java  |  10 +-
 .../org/apache/geode/internal/AvailablePort.java   |   4 +-
 .../geode/internal/HeapDataOutputStream.java       |   2 +-
 .../geode/internal/InternalDataSerializer.java     |  24 +--
 .../geode/internal/InternalInstantiator.java       |  16 +-
 .../org/apache/geode/internal/ManagerInfo.java     |   2 +-
 .../java/org/apache/geode/internal/ObjIdMap.java   |   4 +-
 .../org/apache/geode/internal/SystemAdmin.java     |  14 +-
 .../apache/geode/internal/UniqueIdGenerator.java   |   4 +-
 .../org/apache/geode/internal/admin/CacheInfo.java |   2 +-
 .../org/apache/geode/internal/admin/GemFireVM.java |  10 +-
 .../apache/geode/internal/admin/ListenerIdMap.java |   2 +-
 .../internal/admin/remote/BridgeServerRequest.java |  32 ++--
 .../admin/remote/BridgeServerResponse.java         |   6 +-
 .../admin/remote/DistributionLocatorId.java        |   2 +-
 .../internal/admin/remote/RegionAdminRequest.java  |   2 +-
 .../internal/admin/remote/RegionResponse.java      |   2 +-
 .../geode/internal/admin/remote/RemoteAlert.java   |   2 +-
 .../internal/admin/remote/RemoteBridgeServer.java  |   8 +-
 .../internal/admin/remote/RemoteCacheInfo.java     |   4 +-
 .../internal/admin/remote/RemoteGemFireVM.java     |   2 +-
 .../admin/remote/RemoteGfManagerAgent.java         |   8 +-
 .../geode/internal/cache/AbstractCacheServer.java  |   8 +-
 .../geode/internal/cache/AbstractDiskRegion.java   |   6 +-
 .../geode/internal/cache/AbstractRegion.java       |   2 +-
 .../apache/geode/internal/cache/BucketAdvisor.java |  14 +-
 .../apache/geode/internal/cache/BucketRegion.java  |   8 +-
 .../internal/cache/CacheDistributionAdvisor.java   |   2 +-
 .../geode/internal/cache/CacheServerImpl.java      |  14 +-
 .../geode/internal/cache/CacheServerLauncher.java  |  26 ++--
 .../apache/geode/internal/cache/Conflatable.java   |   2 +-
 .../geode/internal/cache/ControllerAdvisor.java    |  10 +-
 .../internal/cache/CreateRegionProcessor.java      |   2 +-
 .../apache/geode/internal/cache/DiskInitFile.java  |   4 +-
 .../geode/internal/cache/DiskStoreFactoryImpl.java |   6 +-
 .../apache/geode/internal/cache/DiskStoreImpl.java |   2 +-
 .../internal/cache/DistributedCacheOperation.java  |   4 +-
 .../geode/internal/cache/DistributedRegion.java    |   6 +-
 .../DistributedRegionFunctionStreamingMessage.java |   4 +-
 .../geode/internal/cache/EntryEventImpl.java       |   2 +-
 .../apache/geode/internal/cache/ExpiryTask.java    |   2 +-
 .../apache/geode/internal/cache/FilterProfile.java |   4 +-
 .../internal/cache/FindDurableQueueProcessor.java  |   2 +-
 .../geode/internal/cache/GemFireCacheImpl.java     |   8 +-
 .../apache/geode/internal/cache/GridAdvisor.java   |   8 +-
 .../apache/geode/internal/cache/LocalDataSet.java  |   2 +-
 .../apache/geode/internal/cache/LocalRegion.java   |  36 ++---
 .../cache/MemberFunctionStreamingMessage.java      |   4 +-
 .../org/apache/geode/internal/cache/Oplog.java     |  24 +--
 .../apache/geode/internal/cache/OverflowOplog.java |  10 +-
 .../geode/internal/cache/PRQueryProcessor.java     |   2 +-
 .../cache/PartitionRegionConfigValidator.java      |   6 +-
 .../geode/internal/cache/PartitionedRegion.java    |  40 ++---
 .../internal/cache/PartitionedRegionDataStore.java |   2 +-
 .../internal/cache/PartitionedRegionHelper.java    |   2 +-
 .../internal/cache/PlaceHolderDiskRegion.java      |   2 +-
 .../geode/internal/cache/ProxyRegionMap.java       |  82 +++++------
 .../geode/internal/cache/QueuedOperation.java      |   4 +-
 .../cache/SearchLoadAndWriteProcessor.java         |  10 +-
 .../geode/internal/cache/StateFlushOperation.java  |   4 +-
 .../geode/internal/cache/TXCommitMessage.java      |   4 +-
 .../apache/geode/internal/cache/TXEntryState.java  |  28 ++--
 .../geode/internal/cache/TXEntryUserAttrState.java |   2 +-
 .../apache/geode/internal/cache/TXManagerImpl.java |   6 +-
 .../org/apache/geode/internal/cache/TXMessage.java |   6 +-
 .../geode/internal/cache/TXReservationMgr.java     |   2 +-
 .../org/apache/geode/internal/cache/TXState.java   |   2 +-
 .../geode/internal/cache/UpdateOperation.java      |   2 +-
 .../geode/internal/cache/VMLRURegionMap.java       |   2 +-
 .../geode/internal/cache/ValidatingDiskRegion.java |   2 +-
 .../cache/eviction/MemoryLRUController.java        |   2 +-
 .../execute/FunctionStreamingResultCollector.java  |   6 +-
 .../internal/cache/locks/TXLockServiceImpl.java    |   4 +-
 .../locks/TXRecoverGrantorMessageProcessor.java    |   2 +-
 .../cache/partitioned/BucketSizeMessage.java       |   2 +-
 .../cache/partitioned/ContainsKeyValueMessage.java |   2 +-
 .../internal/cache/partitioned/DestroyMessage.java |   2 +-
 .../cache/partitioned/IndexCreationMsg.java        |   4 +-
 .../cache/partitioned/InvalidateMessage.java       |   2 +-
 .../PRFunctionStreamingResultCollector.java        |   4 +-
 .../cache/partitioned/PartitionMessage.java        |   8 +-
 .../internal/cache/partitioned/PutMessage.java     |   2 +-
 .../cache/partitioned/RemoveIndexesMessage.java    |   4 +-
 .../internal/cache/partitioned/SizeMessage.java    |   2 +-
 .../partitioned/StreamingPartitionOperation.java   |   2 +-
 .../cache/persistence/DiskInitFileParser.java      |   2 +-
 .../apache/geode/internal/cache/tier/Acceptor.java |   2 +-
 .../internal/cache/tier/sockets/AcceptorImpl.java  |   8 +-
 .../internal/cache/tier/sockets/BaseCommand.java   |   4 +-
 .../cache/tier/sockets/BaseCommandQuery.java       |   2 +-
 .../cache/tier/sockets/CacheClientNotifier.java    |   4 +-
 .../cache/tier/sockets/CacheClientUpdater.java     |   2 +-
 .../cache/tier/sockets/ChunkedMessage.java         |   2 +-
 .../cache/tier/sockets/ClientHealthMonitor.java    |   2 +-
 .../cache/tier/sockets/HAEventWrapper.java         |   2 +-
 .../geode/internal/cache/tier/sockets/Message.java |   8 +-
 .../cache/tier/sockets/ServerConnection.java       |   4 +-
 .../internal/cache/tx/RemoteDestroyMessage.java    |   2 +-
 .../geode/internal/cache/tx/RemotePutMessage.java  |   2 +-
 .../internal/cache/xmlcache/CacheCreation.java     |  10 +-
 .../cache/xmlcache/CacheServerCreation.java        |   4 +-
 .../geode/internal/cache/xmlcache/CacheXml.java    |   2 +-
 .../internal/cache/xmlcache/CacheXmlGenerator.java |  10 +-
 .../internal/cache/xmlcache/CacheXmlParser.java    |  64 ++++----
 .../xmlcache/DiskStoreAttributesCreation.java      |  18 +--
 .../cache/xmlcache/RegionAttributesCreation.java   |   6 +-
 .../internal/cache/xmlcache/RegionCreation.java    |   4 +-
 .../datasource/FacetsJCAConnectionManagerImpl.java |   4 +-
 .../datasource/JCAConnectionManagerImpl.java       |   6 +-
 .../apache/geode/internal/jndi/ContextImpl.java    |  24 +--
 .../apache/geode/internal/net/SocketCreator.java   |   4 +-
 .../geode/internal/statistics/ArchiveSplitter.java |  10 +-
 .../internal/statistics/GemFireStatSampler.java    |   2 +-
 .../geode/internal/statistics/HostStatHelper.java  |   8 +-
 .../internal/statistics/StatArchiveReader.java     |  14 +-
 .../internal/statistics/StatArchiveWriter.java     |   6 +-
 .../statistics/StatisticDescriptorImpl.java        |  12 +-
 .../geode/internal/statistics/StatisticsImpl.java  |   4 +-
 .../statistics/StatisticsTypeFactoryImpl.java      |   2 +-
 .../internal/statistics/StatisticsTypeImpl.java    |   4 +-
 .../internal/statistics/StatisticsTypeXml.java     |   6 +-
 .../org/apache/geode/internal/tcp/Connection.java  |  26 ++--
 .../org/apache/geode/internal/tcp/TCPConduit.java  |   2 +-
 .../UniversalMembershipListenerAdapter.java        |   2 +-
 .../cache/query/cq/dunit/CqQueryDUnitTest.java     |  12 +-
 .../query/cq/dunit/CqQueryUsingPoolDUnitTest.java  |  14 +-
 .../dunit/PartitionedRegionCqQueryDUnitTest.java   |  64 ++++----
 .../query/cq/dunit/PrCqUsingPoolDUnitTest.java     |  78 +++++-----
 .../cache/query/dunit/PdxQueryCQDUnitTest.java     |  38 ++---
 .../cache/query/dunit/PdxQueryCQTestBase.java      |  10 +-
 .../query/dunit/QueryIndexUpdateRIDUnitTest.java   |   4 +-
 .../cache/PRDeltaPropagationDUnitTest.java         |  14 +-
 .../geode/internal/cache/PutAllCSDUnitTest.java    | 130 ++++++++--------
 ...ltaToRegionRelationCQRegistrationDUnitTest.java |   4 +-
 .../management/CacheServerManagementDUnitTest.java |   2 +-
 .../java/batterytest/greplogs/LogConsumer.java     |   2 +-
 .../cache/client/internal/LocatorTestBase.java     |   2 +-
 .../apache/geode/cache30/ClientServerTestCase.java |   4 +-
 .../cli/commands/QueryCommandDUnitTestBase.java    |   2 +-
 .../cache/lucene/LuceneIndexCreationDUnitTest.java |   2 +-
 .../cache/lucene/internal/xml/LuceneXmlParser.java |   2 +-
 .../geode/internal/cache/wan/WANTestBase.java      |   4 +-
 .../cache/wan/misc/WanValidationsDUnitTest.java    |   6 +-
 .../wan/GatewaySenderEventRemoteDispatcher.java    |   8 +-
 .../cache/wan/GatewaySenderFactoryImpl.java        |   2 +-
 232 files changed, 1120 insertions(+), 1121 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/ConnectionPoolDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/ConnectionPoolDUnitTest.java
index a3d0237..beab932 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/ConnectionPoolDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/ConnectionPoolDUnitTest.java
@@ -92,7 +92,7 @@ import org.apache.geode.test.junit.categories.ClientServerTest;
 @FixMethodOrder(NAME_ASCENDING)
 public class ConnectionPoolDUnitTest extends JUnit4CacheTestCase {
 
-  /** The port on which the bridge server was started in this VM */
+  /** The port on which the cache server was started in this VM */
   private static int bridgeServerPort;
 
   protected static int port = 0;
@@ -154,7 +154,7 @@ public class ConnectionPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Create a bridge server on the given port without starting it.
+   * Create a cache server on the given port without starting it.
    *
    * @since GemFire 5.0.2
    */
@@ -166,7 +166,7 @@ public class ConnectionPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    *
    * @since GemFire 4.0
@@ -205,7 +205,7 @@ public class ConnectionPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Stops the bridge server that serves up the given cache.
+   * Stops the cache server that serves up the given cache.
    *
    * @since GemFire 4.0
    */
@@ -704,7 +704,7 @@ public class ConnectionPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Tests for bug 36684 by having two bridge servers with cacheloaders that should always return a
+   * Tests for bug 36684 by having two cache servers with cacheloaders that should always return a
    * value and one client connected to each server reading values. If the bug exists, the clients
    * will get null sometimes.
    *
@@ -1052,7 +1052,7 @@ public class ConnectionPoolDUnitTest extends JUnit4CacheTestCase {
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
 
-    // Create two bridge servers
+    // Create two cache servers
     SerializableRunnable createCacheServer = new CacheSerializableRunnable("Create Cache Server") {
       public void run2() throws CacheException {
         AttributesFactory factory = getBridgeServerRegionAttributes(null, null);
@@ -1259,7 +1259,7 @@ public class ConnectionPoolDUnitTest extends JUnit4CacheTestCase {
 
     try {
 
-      // Create two bridge servers
+      // Create two cache servers
       SerializableRunnable createCacheServer =
           new CacheSerializableRunnable("Create Cache Server") {
             public void run2() throws CacheException {
@@ -4321,9 +4321,9 @@ public class ConnectionPoolDUnitTest extends JUnit4CacheTestCase {
 
   /**
    * Test dynamic region creation instantiated from a bridge client causing regions to be created on
-   * two different bridge servers.
+   * two different cache servers.
    *
-   * Also tests the reverse situation, a dynamic region is created on the bridge server expecting
+   * Also tests the reverse situation, a dynamic region is created on the cache server expecting
    * the same region to be created on the client.
    *
    * Note: This test re-creates Distributed Systems for its own purposes and uses a Loner
@@ -4358,7 +4358,7 @@ public class ConnectionPoolDUnitTest extends JUnit4CacheTestCase {
           try {
             startBridgeServer(0);
           } catch (IOException ugh) {
-            fail("Bridge Server startup failed");
+            fail("cache server startup failed");
           }
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.DISTRIBUTED_ACK);
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/AutoConnectionSourceDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/AutoConnectionSourceDUnitTest.java
index aa17a91..98a3647 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/AutoConnectionSourceDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/AutoConnectionSourceDUnitTest.java
@@ -349,7 +349,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
 
     String locators = getLocatorString(getServerHostName(), locatorPort);
 
-    // start a bridge server with a listener
+    // start a cache server with a listener
     addBridgeListener(bridge1VM);
     int serverPort1 =
         bridge1VM.invoke("Start BridgeServer", () -> startBridgeServer(null, locators));
@@ -362,7 +362,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     // wait for client to connect
     checkEndpoints(clientVM, serverPort1);
 
-    // make sure the client and bridge server both noticed each other
+    // make sure the client and cache server both noticed each other
     waitForJoin(bridge1VM);
     MyListener serverListener = getBridgeListener(bridge1VM);
     Assert.assertEquals(0, serverListener.getCrashes());
@@ -379,7 +379,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
 
     checkEndpoints(clientVM, serverPort1);
 
-    // start another bridge server and make sure it is detected by the client
+    // start another cache server and make sure it is detected by the client
     int serverPort2 =
         bridge2VM.invoke("Start BridgeServer", () -> startBridgeServer(null, locators));
 
@@ -396,7 +396,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     Assert.assertEquals(1, clientListener.getJoins());
     resetBridgeListener(clientVM);
 
-    // stop the second bridge server and make sure it is detected by the client
+    // stop the second cache server and make sure it is detected by the client
     stopBridgeMemberVM(bridge2VM);
 
     checkEndpoints(clientVM, serverPort1);
@@ -411,7 +411,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     Assert.assertEquals(1, clientListener.getDepartures() + clientListener.getCrashes());
     resetBridgeListener(clientVM);
 
-    // stop the client and make sure the bridge server notices
+    // stop the client and make sure the cache server notices
     stopBridgeMemberVM(clientVM);
     waitForDeparture(bridge1VM);
     serverListener = getBridgeListener(bridge1VM);
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/LocatorLoadBalancingDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/LocatorLoadBalancingDUnitTest.java
index 4bfbbd5..513338a 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/LocatorLoadBalancingDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/client/internal/LocatorLoadBalancingDUnitTest.java
@@ -62,7 +62,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
 
   /**
    * The number of connections that we can be off by in the balancing tests We need this little
-   * fudge factor, because the locator can receive an update from the bridge server after it has
+   * fudge factor, because the locator can receive an update from the cache server after it has
    * made incremented its counter for a client connection, but the client hasn't connected yet. This
    * wipes out the estimation on the locator. This means that we may be slighly off in our balance.
    * <p>
@@ -76,8 +76,8 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
   }
 
   /**
-   * Test the locator discovers a bridge server and is initialized with the correct load for that
-   * bridge server.
+   * Test the locator discovers a cache server and is initialized with the correct load for that
+   * cache server.
    */
   @Test
   public void testDiscovery() {
@@ -167,8 +167,8 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
   }
 
   /**
-   * Test to make sure the bridge servers communicate their updated load to the controller when the
-   * load on the bridge server changes.
+   * Test to make sure the cache servers communicate their updated load to the controller when the
+   * load on the cache server changes.
    *
    */
   @Test
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/CompiledInDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/CompiledInDUnitTest.java
index 24b88d3..ba9e95e 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/CompiledInDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/CompiledInDUnitTest.java
@@ -148,7 +148,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where getMapField['1'] in SET ($1,$2)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -174,7 +174,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where getMapField['1'] in SET ($1,$2)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -193,7 +193,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final int numExpectedResults = numberOfEntries / 2;
     final String queryString = "select * from " + regName + " where getMapField['1'] in SET ($1)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -214,7 +214,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final int numExpectedResults = numberOfEntries / 2;
     final String queryString = "select * from " + regName + " where getMapField['1'] in SET ($1)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -235,7 +235,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where getMapField['1'] in SET ($1,$2,$3)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -257,7 +257,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where getMapField['1'] in SET ($1,$2)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createPartitionRegion(false);
@@ -278,7 +278,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where getMapField['1'] in SET ($1,$2)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createPartitionRegion(false);
@@ -297,7 +297,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final int numExpectedResults = numberOfEntries / 2;
     final String queryString = "select * from " + regName + " where getMapField['1'] in SET ($1)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createPartitionRegion(false);
@@ -317,7 +317,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final int numExpectedResults = numberOfEntries / 2;
     final String queryString = "select * from " + regName + " where getMapField['1'] in SET ($1)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createPartitionRegion(false);
@@ -338,7 +338,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where getMapField['1'] in SET ($1,$2,$3)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createPartitionRegion(false);
@@ -359,7 +359,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
 
     final String queryString = "select * from " + regName + " where $1 in SET (getMapField['1'])";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -382,7 +382,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where $1 in SET (getMapField['1'], getMapField['0'])";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -405,7 +405,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString = "select * from " + regName
         + " where getMapField['1'] in SET (getMapField['1'], getMapField['2'], 'asdfasdf', getMapField['0'])";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -427,7 +427,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString = "select * from " + regName
         + " where getMapField['1'] in SET ($1, $2) AND getMapField['1'] in SET($3)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -449,7 +449,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString = "select * from " + regName
         + " where getMapField['1'] in SET ($1, $2) AND getMapField['2'] in SET($3)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -478,7 +478,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where getMapField['1'] in SET($1, $1, $1)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -500,7 +500,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString = "select * from " + regName
         + " where getMapField['1'] in SET ($1) OR getMapField['0'] in SET($2)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createReplicateRegion();
@@ -522,7 +522,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where getMapField['1'] in SET ($1,$2)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createPartitionRegion(false);
@@ -543,7 +543,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where getMapField['1'] in SET ($1,$2)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createPartitionRegion(false);
@@ -562,7 +562,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final int numExpectedResults = numberOfEntries / 2;
     final String queryString = "select * from " + regName + " where getMapField['1'] in SET ($1)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createPartitionRegion(false);
@@ -582,7 +582,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final int numExpectedResults = numberOfEntries / 2;
     final String queryString = "select * from " + regName + " where getMapField['1'] in SET ($1)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createPartitionRegion(false);
@@ -603,7 +603,7 @@ public class CompiledInDUnitTest extends JUnit4CacheTestCase {
     final String queryString =
         "select * from " + regName + " where getMapField['1'] in SET ($1,$2,$3)";
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         createPartitionRegion(false);
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PDXQueryTestBase.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PDXQueryTestBase.java
index 47036df..dd976b5 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PDXQueryTestBase.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PDXQueryTestBase.java
@@ -57,7 +57,7 @@ import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 
 public abstract class PDXQueryTestBase extends JUnit4CacheTestCase {
 
-  /** The port on which the bridge server was started in this VM */
+  /** The port on which the cache server was started in this VM */
   private static int bridgeServerPort;
   protected static final Compressor compressor = SnappyCompressor.getDefaultInstance();
   protected final String rootRegionName = "root";
@@ -92,7 +92,7 @@ public abstract class PDXQueryTestBase extends JUnit4CacheTestCase {
     final Host host = Host.getHost(0);
     for (int i = 0; i < 4; i++) {
       VM vm = host.getVM(i);
-      vm.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+      vm.invoke(new CacheSerializableRunnable("Create cache server") {
         public void run2() throws CacheException {
           TestObject.numInstance = 0;
           PortfolioPdx.numInstance = 0;
@@ -251,7 +251,7 @@ public abstract class PDXQueryTestBase extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    */
   protected void startBridgeServer(int port, boolean notifyBySubscription) throws IOException {
@@ -264,7 +264,7 @@ public abstract class PDXQueryTestBase extends JUnit4CacheTestCase {
   }
 
   /**
-   * Stops the bridge server that serves up the given cache.
+   * Stops the cache server that serves up the given cache.
    */
   protected void stopBridgeServer(Cache cache) {
     CacheServer bridge = (CacheServer) cache.getCacheServers().iterator().next();
@@ -287,7 +287,7 @@ public abstract class PDXQueryTestBase extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    */
   protected void startCacheServer(int port, boolean notifyBySubscription) throws IOException {
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxLocalQueryDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxLocalQueryDUnitTest.java
index 3e0e604..cf96547 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxLocalQueryDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxLocalQueryDUnitTest.java
@@ -911,7 +911,7 @@ public class PdxLocalQueryDUnitTest extends PDXQueryTestBase {
     final Host host = Host.getHost(0);
     for (int i = 0; i < 4; i++) {
       VM vm = host.getVM(i);
-      vm.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+      vm.invoke(new CacheSerializableRunnable("Create cache server") {
         public void run2() throws CacheException {
           TestObject.numInstance = 0;
           PortfolioPdx.numInstance = 0;
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryDUnitTest.java
index 59e1bee..ac15dbb 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryDUnitTest.java
@@ -93,7 +93,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     final int numberOfEntries = 5;
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -105,7 +105,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -159,7 +159,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     final int numberOfEntries = 10;
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -177,7 +177,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -222,7 +222,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -253,7 +253,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     final int numberOfEntries = 10;
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, false, false, compressor);
         Region region = getRootRegion().getSubregion(regionName);
@@ -265,7 +265,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -314,7 +314,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -340,7 +340,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     final int numberOfEntries = 10;
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -361,7 +361,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -408,7 +408,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -434,7 +434,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     final int numberOfEntries = 10;
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -445,7 +445,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -538,7 +538,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -586,7 +586,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
         "SELECT p FROM " + regName + " p WHERE p.status != 'active'",};
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, false, true, null); // Async index
         Region region = getRootRegion().getSubregion(regionName);
@@ -604,7 +604,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, false, true, null); // Async index
         Region region = getRootRegion().getSubregion(regionName);
@@ -643,7 +643,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Execute query and make sure there is no PdxInstance in the results.
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         // Execute query locally.
         QueryService queryService = getCache().getQueryService();
@@ -675,7 +675,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Re-execute query to fetch PdxInstance in the results.
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         // Execute query locally.
         GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
@@ -734,7 +734,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     final String queryStr = "SELECT COUNT(*) FROM " + regName + " WHERE id >= 0";
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -745,7 +745,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -834,7 +834,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -890,7 +890,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -898,7 +898,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -1000,7 +1000,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     final int numberOfEntries = 10;
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -1014,7 +1014,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -1116,7 +1116,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -1141,26 +1141,26 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     VM vm3 = host.getVM(3);
     final int numberOfEntries = 100;
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, true);
       }
     });
 
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, false);
       }
     });
 
-    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm2.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, false);
       }
     });
 
     // Load region.
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         for (int i = 0; i < numberOfEntries; i++) {
@@ -1204,7 +1204,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Check for TestObject instances.
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(numberOfEntries, TestObject.numInstance);
       }
@@ -1212,7 +1212,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -1220,14 +1220,14 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances.
     // It should be 0
-    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm2.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
     });
 
     // Execute Query on Server2.
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         QueryService qs = getCache().getQueryService();
         Query query = null;
@@ -1259,7 +1259,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances.
     // It should be 0
-    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm2.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -1285,25 +1285,25 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     VM vm3 = host.getVM(3);
     final int numberOfEntries = 100;
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, true);
       }
     });
 
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, false);
       }
     });
 
-    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm2.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, false);
       }
     });
 
-    vm3.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm3.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, false);
       }
@@ -1311,7 +1311,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Load region using class loader and execute query on the same thread.
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         try {
@@ -1368,25 +1368,25 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     VM vm3 = host.getVM(3);
     final int numberOfEntries = 100;
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, true);
       }
     });
 
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, false);
       }
     });
 
-    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm2.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, false);
       }
     });
 
-    vm3.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm3.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(true, false);
       }
@@ -1394,7 +1394,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Load region using class loader and execute query on the same thread.
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         try {
@@ -1437,7 +1437,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
             + " p, p.positions.values pos WHERE p.ID > 2 or pos.secId = 'vmware'",};
 
     // Execute query on node without class and with pdxReadSerialized.
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         GemFireCacheImpl c = (GemFireCacheImpl) region.getCache();
@@ -1496,7 +1496,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     final int numberOfEntries = 10;
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -1522,7 +1522,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -1572,13 +1572,13 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     vm2.invoke(createClientRegions);
     vm3.invoke(createClientRegions);
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(numberOfEntries, TestObject.numInstance);
       }
     });
 
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -1649,7 +1649,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     vm2.invoke(executeQueries);
     vm3.invoke(executeQueries);
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(numberOfEntries, TestObject.numInstance);
       }
@@ -1657,7 +1657,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -1689,7 +1689,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
         "SELECT * FROM " + this.regName + ".values p WHERE p.ticker = 'vmware'",};
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -1700,7 +1700,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -1772,7 +1772,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     vm3.invoke(executeQueries);
 
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -1780,7 +1780,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Create index
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         QueryService qs = getCache().getQueryService();
@@ -1798,7 +1798,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -1844,7 +1844,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
             + " coll2, pf2.positions.values posit2 WHERE posit1.secId='IBM' AND posit2.secId='IBM'",};
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region1 = getRootRegion().getSubregion(regionName);
@@ -1858,7 +1858,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -1933,7 +1933,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     vm3.invoke(executeQueries);
 
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, PortfolioPdx.numInstance);
         assertEquals(0, PositionPdx.numInstance);
@@ -1942,7 +1942,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Create index
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         QueryService qs = getCache().getQueryService();
@@ -1968,7 +1968,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     // PorfolioPdx object
     // but there is a method getPk(), so for #44436, the objects are
     // deserialized to get the value in vm1
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(numberOfEntries, PortfolioPdx.numInstance);
         assertEquals(325, PositionPdx.numInstance); // 50 PorforlioPdx objects
@@ -2018,7 +2018,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
             + " coll2, pf2.positions.values posit2 WHERE posit1.secId='IBM' AND posit2.secId='IBM'",};
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, true);
         Region region1 = getRootRegion().getSubregion(regionName);
@@ -2027,7 +2027,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, true);
         Region region = getRootRegion().getSubregion(regionName);
@@ -2036,7 +2036,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm2.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, true);
         Region region = getRootRegion().getSubregion(regionName);
@@ -2111,7 +2111,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     vm3.invoke(executeQueries);
 
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, PortfolioPdx.numInstance);
         assertEquals(0, PositionPdx.numInstance);
@@ -2120,7 +2120,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Create index
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         QueryService qs = getCache().getQueryService();
@@ -2144,7 +2144,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances.
     // It should be 0
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, PortfolioPdx.numInstance);
         assertEquals(0, PositionPdx.numInstance);
@@ -2155,7 +2155,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     // PorfolioPdx object
     // but there is a method getPk(), so for #44436, the objects are
     // deserialized to get the value in vm1
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(numberOfEntries, PortfolioPdx.numInstance);
         // 50 PorforlioPdx objects create (50*3)+50+50+50+25 = 325 PositionPdx
@@ -2164,7 +2164,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
       }
     });
 
-    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm2.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, PortfolioPdx.numInstance);
         assertEquals(0, PositionPdx.numInstance);
@@ -2194,7 +2194,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     final String queryStr =
         "SELECT DISTINCT * FROM " + this.regName + " pf where pf.ID > 2 and pf.ID < 10";
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -2202,7 +2202,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -2285,7 +2285,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, PortfolioPdx.numInstance);
       }
@@ -2355,7 +2355,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -2363,7 +2363,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -2449,7 +2449,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     config.setProperty("locators", "localhost[" + DistributedTestUtils.getDUnitLocatorPort() + "]");
 
     // Start server
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Cache cache = new CacheFactory(config).create();
         AttributesFactory factory = new AttributesFactory();
@@ -2472,7 +2472,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Cache cache = new CacheFactory(config).create();
         AttributesFactory factory = new AttributesFactory();
@@ -2489,7 +2489,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     });
 
     // Start server
-    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm2.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Cache cache = new CacheFactory(config).create();
         AttributesFactory factory = new AttributesFactory();
@@ -3465,7 +3465,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     VM vm1 = host.getVM(1);
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getCache().createRegionFactory(RegionShortcut.REPLICATE).create("testJson");
@@ -3480,7 +3480,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         getCache().createRegionFactory(RegionShortcut.REPLICATE).create("testJson");
@@ -3516,7 +3516,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
     VM vm1 = host.getVM(1);
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getCache().createRegionFactory(RegionShortcut.REPLICATE).create("testJson");
@@ -3531,7 +3531,7 @@ public class PdxQueryDUnitTest extends PDXQueryTestBase {
 
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         getCache().createRegionFactory(RegionShortcut.REPLICATE).create("testJson");
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxStringQueryDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxStringQueryDUnitTest.java
index 3a87a80..2c1e16e 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxStringQueryDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxStringQueryDUnitTest.java
@@ -328,7 +328,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
     client.invoke(executeQueries);
 
     // Put Non Pdx objects on server execute queries locally
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
@@ -367,7 +367,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // test for readSerialized flag
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
         cache.setReadSerializedForTest(true);
@@ -570,7 +570,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
     client.invoke(executeQueries);
 
     // Put Non Pdx objects on server execute queries locally
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
@@ -599,7 +599,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // test for readSerialized flag
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
         cache.setReadSerializedForTest(true);
@@ -794,7 +794,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
     client.invoke(executeQueries);
 
     // Put Non Pdx objects on server execute queries locally
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
@@ -822,7 +822,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
       }
     });
     // test for readSerialized flag
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
         cache.setReadSerializedForTest(true);
@@ -1068,7 +1068,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
     client.invoke(executeQueries);
 
     // Put Non Pdx objects on server execute queries locally
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
@@ -1107,7 +1107,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // test for readSerialized flag
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
         cache.setReadSerializedForTest(true);
@@ -1321,7 +1321,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
 
     client.invoke(executeQueries);
     // Put Non Pdx objects on server execute queries locally
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
@@ -1350,7 +1350,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // test for readSerialized flag
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
         cache.setReadSerializedForTest(true);
@@ -1566,7 +1566,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
     client.invoke(executeQueries);
 
     // Put Non Pdx objects on server execute queries locally
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
@@ -1595,7 +1595,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // test for readSerialized flag
-    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    server0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
         cache.setReadSerializedForTest(true);
@@ -2044,7 +2044,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    */
   protected void startBridgeServer(int port, boolean notifyBySubscription) throws IOException {
@@ -2057,7 +2057,7 @@ public class PdxStringQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Stops the bridge server that serves up the given cache.
+   * Stops the cache server that serves up the given cache.
    */
   protected void stopBridgeServer(Cache cache) {
     CacheServer server = (CacheServer) cache.getCacheServers().iterator().next();
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/QueryUsingPoolDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/QueryUsingPoolDUnitTest.java
index 38b6e64..05986ba 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/QueryUsingPoolDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/QueryUsingPoolDUnitTest.java
@@ -76,7 +76,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
   private static final Logger logger = LogService.getLogger();
 
   /**
-   * The port on which the bridge server was started in this VM
+   * The port on which the cache server was started in this VM
    */
   private static int bridgeServerPort;
 
@@ -158,7 +158,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       createAndStartBridgeServer();
       AttributesFactory factory = new AttributesFactory();
       factory.setScope(Scope.LOCAL);
@@ -166,7 +166,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       Region region = getRootRegion().getSubregion(name);
       for (int i = 0; i < numberOfEntries; i++) {
         region.put("key-" + i, new TestObject(i, "ibm"));
@@ -289,7 +289,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    final int port = vm0.invoke("Create Bridge Server", () -> {
+    final int port = vm0.invoke("Create cache server", () -> {
       setupBridgeServerAndCreateData(name, numberOfEntries);
       return getCacheServerPort();
     });
@@ -425,7 +425,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       createAndStartBridgeServer();
       AttributesFactory factory = new AttributesFactory();
       factory.setScope(Scope.LOCAL);
@@ -433,7 +433,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       Region region = getRootRegion().getSubregion(name);
       for (int i = 0; i < numberOfEntries; i++) {
         region.put("key-" + i, new TestObject(i, "ibm"));
@@ -617,7 +617,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     assertNotNull(this.regionName);
 
     // Start server
-    final int port = vm0.invoke("Create Bridge Server", () -> {
+    final int port = vm0.invoke("Create cache server", () -> {
       setupBridgeServerAndCreateData(regionName, numberOfEntries);
       return getCacheServerPort();
     });
@@ -730,13 +730,13 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     };
 
     // Start server1
-    final int port0 = vm0.invoke("Create Bridge Server", () -> {
+    final int port0 = vm0.invoke("Create cache server", () -> {
       setupBridgeServerAndCreateData(regionName, numberOfEntries);
       return getCacheServerPort();
     });
 
     // Start server2
-    final int port1 = vm1.invoke("Create Bridge Server", () -> {
+    final int port1 = vm1.invoke("Create cache server", () -> {
       setupBridgeServerAndCreateData(regionName, numberOfEntries);
       return getCacheServerPort();
     });
@@ -847,7 +847,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     };
 
     // Start server
-    final int port = vm0.invoke("Create Bridge Server", () -> {
+    final int port = vm0.invoke("Create cache server", () -> {
       setupBridgeServerAndCreateData(name, numberOfEntries);
       return getCacheServerPort();
     });
@@ -908,7 +908,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     };
 
     // Start server
-    final int port = vm0.invoke("Create Bridge Server", () -> {
+    final int port = vm0.invoke("Create cache server", () -> {
       setupBridgeServerAndCreateData(name, numberOfEntries);
       QueryService queryService = getCache().getQueryService();
       queryService.newQuery("Select * from " + regName);
@@ -990,7 +990,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     };
 
     // Start server
-    final int port = vm0.invoke("Create Bridge Server", () -> {
+    final int port = vm0.invoke("Create cache server", () -> {
       setupBridgeServerAndCreateData(name, numberOfEntries);
       QueryService queryService = getCache().getQueryService();
       queryService.newQuery("Select * from " + regName);
@@ -1092,7 +1092,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
         };
 
     // Start server1
-    final int port0 = vm0.invoke("Create Bridge Server", () -> {
+    final int port0 = vm0.invoke("Create cache server", () -> {
       setupBridgeServerAndCreateData(regionName, numberOfEntries);
       return getCacheServerPort();
     });
@@ -1203,7 +1203,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       createAndStartBridgeServer();
       AttributesFactory factory = new AttributesFactory();
       factory.setScope(Scope.LOCAL);
@@ -1212,7 +1212,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       Region region1 = getRootRegion().getSubregion(name + "1");
       for (int i = 0; i < numberOfEntries; i++) {
         region1.put("key-" + i, new TestObject(i, "ibm"));
@@ -1291,7 +1291,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       createAndStartBridgeServer();
       AttributesFactory factory = new AttributesFactory();
       factory.setScope(Scope.LOCAL);
@@ -1299,7 +1299,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       Region region = getRootRegion().getSubregion(name);
       for (int i = 0; i < numberOfEntries; i++) {
         region.put("key-" + i, new TestObject(i, "ibm"));
@@ -1387,7 +1387,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       createAndStartBridgeServer();
       AttributesFactory factory = new AttributesFactory();
       factory.setScope(Scope.LOCAL);
@@ -1459,7 +1459,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       createAndStartBridgeServer();
       AttributesFactory factory = new AttributesFactory();
       factory.setScope(Scope.LOCAL);
@@ -1593,7 +1593,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke("Create Bridge Server", () -> {
+    vm0.invoke("Create cache server", () -> {
       createAndStartBridgeServer();
       AttributesFactory factory = new AttributesFactory();
       factory.setScope(Scope.LOCAL);
@@ -1805,7 +1805,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    */
   protected int startBridgeServer(int port, boolean notifyBySubscription) throws IOException {
@@ -1819,7 +1819,7 @@ public class QueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Stops the bridge server that serves up the given cache.
+   * Stops the cache server that serves up the given cache.
    */
   protected void stopBridgeServer(Cache cache) {
     CacheServer bridge = (CacheServer) cache.getCacheServers().iterator().next();
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/RemoteQueryDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/RemoteQueryDUnitTest.java
index b69db16..6df92ee 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/RemoteQueryDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/dunit/RemoteQueryDUnitTest.java
@@ -67,7 +67,7 @@ import org.apache.geode.test.junit.categories.OQLQueryTest;
 @Category({OQLQueryTest.class})
 public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
 
-  /** The port on which the bridge server was started in this VM */
+  /** The port on which the cache server was started in this VM */
   private static int bridgeServerPort;
 
   @Override
@@ -93,7 +93,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
         config.setProperty(LOCATORS,
@@ -112,7 +112,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(name);
         for (int i = 0; i < numberOfEntries; i++) {
@@ -235,7 +235,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
         config.setProperty(LOCATORS,
@@ -254,7 +254,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(name);
         for (int i = 0; i < numberOfEntries; i++) {
@@ -377,7 +377,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
         config.setProperty(LOCATORS,
@@ -396,7 +396,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(name);
         for (int i = 0; i < numberOfEntries; i++) {
@@ -520,7 +520,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
      * final String name = this.getName(); final Host host = Host.getHost(0); VM vm0 =
      * host.getVM(0); VM vm1 = host.getVM(1); // final int numberOfEntries = 100;
      *
-     * // Start server vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") { public
+     * // Start server vm0.invoke(new CacheSerializableRunnable("Create cache server") { public
      * void run2() throws CacheException { Properties config = new Properties();
      * config.setProperty(LOCATORS, "localhost[" + DistributedTestUtils.getDUnitLocatorPort() +
      * "]"); getSystem(config); AttributesFactory factory = new AttributesFactory();
@@ -528,7 +528,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
      * startBridgeServer(0, false); } catch (Exception ex) {
      * Assert.fail("While starting CacheServer", ex); } } });
      *
-     * // Initialize server region vm0.invoke(new CacheSerializableRunnable("Create Bridge Server")
+     * // Initialize server region vm0.invoke(new CacheSerializableRunnable("Create cache server")
      * { public void run2() throws CacheException { Region region =
      * getRootRegion().getSubregion(name); Portfolio portfolio = null; Position position1 = null;
      * Position position2 = null; Properties portfolioProperties= null; Properties
@@ -639,7 +639,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
         config.setProperty(LOCATORS,
@@ -658,7 +658,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(name);
         for (int i = 0; i < numberOfEntries; i++) {
@@ -817,7 +817,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
         config.setProperty(LOCATORS,
@@ -837,7 +837,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region1 = getRootRegion().getSubregion(name + "1");
         for (int i = 0; i < numberOfEntries; i++) {
@@ -921,7 +921,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
         config.setProperty(LOCATORS,
@@ -939,7 +939,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(name);
         for (int i = 0; i < numberOfEntries; i++) {
@@ -1088,7 +1088,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
         config.setProperty(LOCATORS,
@@ -1106,7 +1106,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(name);
         for (int i = 0; i < numberOfEntries; i++) {
@@ -1188,7 +1188,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 100;
 
     // Start server
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
         config.setProperty(LOCATORS,
@@ -1213,7 +1213,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
     });
 
     // Initialize server region
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(name);
         for (int i = 0; i < numberOfEntries; i++) {
@@ -1325,7 +1325,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
 
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    */
   protected void startBridgeServer(int port, boolean notifyBySubscription) throws IOException {
@@ -1339,7 +1339,7 @@ public class RemoteQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Stops the bridge server that serves up the given cache.
+   * Stops the cache server that serves up the given cache.
    */
   protected void stopBridgeServer(Cache cache) {
     CacheServer bridge = (CacheServer) cache.getCacheServers().iterator().next();
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/internal/index/PutAllWithIndexPerfDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/internal/index/PutAllWithIndexPerfDUnitTest.java
index 6d34235..d8ababe 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/internal/index/PutAllWithIndexPerfDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/internal/index/PutAllWithIndexPerfDUnitTest.java
@@ -50,7 +50,7 @@ import org.apache.geode.test.junit.categories.OQLIndexTest;
 @Category({OQLIndexTest.class})
 public class PutAllWithIndexPerfDUnitTest extends JUnit4CacheTestCase {
 
-  /** The port on which the bridge server was started in this VM */
+  /** The port on which the cache server was started in this VM */
   private static int bridgeServerPort;
   static long timeWithoutStructTypeIndex = 0;
   static long timeWithStructTypeIndex = 0;
@@ -75,7 +75,7 @@ public class PutAllWithIndexPerfDUnitTest extends JUnit4CacheTestCase {
     final int numberOfEntries = 10000;
 
     // Start server
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
         config.put(LOCATORS, "localhost[" + DistributedTestUtils.getDUnitLocatorPort() + "]");
@@ -194,7 +194,7 @@ public class PutAllWithIndexPerfDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    */
   protected void startBridgeServer(int port, boolean notifyBySubscription) throws IOException {
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache30/CacheXml66DUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache30/CacheXml66DUnitTest.java
index 4621767..74c5aa3 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache30/CacheXml66DUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache30/CacheXml66DUnitTest.java
@@ -3782,7 +3782,7 @@ public abstract class CacheXml66DUnitTest extends CacheXmlTestCase {
   }
 
   /**
-   * Tests declarative bridge servers
+   * Tests declarative cache servers
    *
    * @since GemFire 4.0
    */
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache30/ClientMembershipDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache30/ClientMembershipDUnitTest.java
index aea285d..0a9873c 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache30/ClientMembershipDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache30/ClientMembershipDUnitTest.java
@@ -732,7 +732,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     final int[] ports = new int[1];
 
     // create BridgeServer in vm0...
-    vm0.invoke("create Bridge Server", () -> {
+    vm0.invoke("create cache server", () -> {
       try {
         System.out.println("[testClientMembershipEventsInClient] Create BridgeServer");
         getSystem();
@@ -864,7 +864,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertFalse(isClient[CRASHED]);
     resetArraysForTesting(fired, member, memberId, isClient);
 
-    // now test that we redisover the bridge server
+    // now test that we redisover the cache server
     vm0.invoke("Recreate BridgeServer", () -> {
       try {
         System.out.println("[testClientMembershipEventsInClient] restarting BridgeServer");
@@ -894,7 +894,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
   }
 
   /**
-   * Tests notification of events in server process. Bridge servers detect client joins when the
+   * Tests notification of events in server process. cache servers detect client joins when the
    * client connects to the server.
    */
   @Test
@@ -1317,7 +1317,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     for (int i = 0; i < host.getVMCount(); i++) {
       final int whichVM = i;
       final VM vm = Host.getHost(0).getVM(i);
-      vm.invoke("Create bridge server", () -> {
+      vm.invoke("Create cache server", () -> {
         // create BridgeServer in controller vm...
         System.out.println("[testGetConnectedServers] Create BridgeServer");
         getSystem();
@@ -1423,7 +1423,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     for (int i = 0; i < host.getVMCount(); i++) {
       final int whichVM = i;
       final VM vm = Host.getHost(0).getVM(i);
-      vm.invoke(new CacheSerializableRunnable("Create bridge server") {
+      vm.invoke(new CacheSerializableRunnable("Create cache server") {
         public void run2() throws CacheException {
           // create BridgeServer in controller vm...
           System.out.println("[testGetNotifiedClients] Create BridgeServer");
@@ -1483,7 +1483,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     for (int i = 0; i < host.getVMCount(); i++) {
       final int whichVM = i;
       final VM vm = Host.getHost(0).getVM(i);
-      vm.invoke("Create bridge server", () -> {
+      vm.invoke("Create cache server", () -> {
         Map clients = InternalClientMembership.getConnectedClients(true, getCache());
         assertNotNull(clients);
         testGetNotifiedClients_clientCount = clients.size();
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache30/ClientRegisterInterestDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache30/ClientRegisterInterestDUnitTest.java
index f7a41f4..edb58dc 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache30/ClientRegisterInterestDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache30/ClientRegisterInterestDUnitTest.java
@@ -67,12 +67,12 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
 
   @Override
   public final void postTearDownCacheTestCase() throws Exception {
-    disconnectAllFromDS(); // cleans up bridge server and client and lonerDS
+    disconnectAllFromDS(); // cleans up cache server and client and lonerDS
   }
 
   /**
    * Tests for Bug 35381 Calling register interest if establishCallbackConnection is not set causes
-   * bridge server NPE.
+   * cache server NPE.
    */
   @Test
   public void testBug35381() throws Exception {
@@ -82,7 +82,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
 
     final int whichVM = 0;
     final VM vm = Host.getHost(0).getVM(whichVM);
-    vm.invoke(new CacheSerializableRunnable("Create bridge server") {
+    vm.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         LogWriterUtils.getLogWriter().info("[testBug35381] Create BridgeServer");
         getSystem();
@@ -165,13 +165,13 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     final String key3 = "KEY-" + regionName1 + "-3";
     final int[] ports = new int[3]; // 3 servers in this test
 
-    // create first bridge server with region for client...
+    // create first cache server with region for client...
     final int firstServerIdx = 0;
     final VM firstServerVM = getHost(0).getVM(firstServerIdx);
-    firstServerVM.invoke(new CacheSerializableRunnable("Create first bridge server") {
+    firstServerVM.invoke(new CacheSerializableRunnable("Create first cache server") {
       public void run2() throws CacheException {
         getLogWriter()
-            .info("[testRegisterInterestFailover] Create first bridge server");
+            .info("[testRegisterInterestFailover] Create first cache server");
         getSystem();
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(LOCAL);
@@ -198,13 +198,13 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // create second bridge server missing region for client...
+    // create second cache server missing region for client...
     final int secondServerIdx = 1;
     final VM secondServerVM = getHost(0).getVM(secondServerIdx);
-    secondServerVM.invoke(new CacheSerializableRunnable("Create second bridge server") {
+    secondServerVM.invoke(new CacheSerializableRunnable("Create second cache server") {
       public void run2() throws CacheException {
         getLogWriter()
-            .info("[testRegisterInterestFailover] Create second bridge server");
+            .info("[testRegisterInterestFailover] Create second cache server");
         getSystem();
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(LOCAL);
@@ -229,7 +229,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // get the bridge server ports...
+    // get the cache server ports...
     ports[firstServerIdx] =
         firstServerVM.invoke(() -> getBridgeServerPort());
     assertTrue(ports[firstServerIdx] != 0);
@@ -239,7 +239,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     assertTrue(ports[firstServerIdx] != ports[secondServerIdx]);
 
     // stop second and third servers
-    secondServerVM.invoke(new CacheSerializableRunnable("Stop second bridge server") {
+    secondServerVM.invoke(new CacheSerializableRunnable("Stop second cache server") {
       public void run2() throws CacheException {
         stopBridgeServers(getCache());
       }
@@ -300,7 +300,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     assertEquals("VAL-1", region3.get(key3));
 
     // do puts on server1 and make sure values come thru for all 3 registrations
-    firstServerVM.invoke(new CacheSerializableRunnable("Puts from first bridge server") {
+    firstServerVM.invoke(new CacheSerializableRunnable("Puts from first cache server") {
       public void run2() throws CacheException {
         Region region1 = getCache().getRegion(regionName1);
         region1.put(key1, "VAL-1-1");
@@ -329,7 +329,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     assertEquals("VAL-1-1", region3.get(key3));
 
     // force failover to server 2
-    secondServerVM.invoke(new CacheSerializableRunnable("Start second bridge server") {
+    secondServerVM.invoke(new CacheSerializableRunnable("Start second cache server") {
       public void run2() throws CacheException {
         try {
           startBridgeServer(ports[secondServerIdx]);
@@ -340,7 +340,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
       }
     });
 
-    firstServerVM.invoke(new CacheSerializableRunnable("Stop first bridge server") {
+    firstServerVM.invoke(new CacheSerializableRunnable("Stop first cache server") {
       public void run2() throws CacheException {
         stopBridgeServers(getCache());
       }
@@ -366,7 +366,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
 
     // region2 registration should be gone now
     // do puts on server2 and make sure values come thru for only 2 registrations
-    secondServerVM.invoke(new CacheSerializableRunnable("Puts from second bridge server") {
+    secondServerVM.invoke(new CacheSerializableRunnable("Puts from second cache server") {
       public void run2() throws CacheException {
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(LOCAL);
@@ -380,7 +380,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
 
     region2.put(key2, "VAL-0");
 
-    secondServerVM.invoke(new CacheSerializableRunnable("Put from second bridge server") {
+    secondServerVM.invoke(new CacheSerializableRunnable("Put from second cache server") {
       public void run2() throws CacheException {
         Region region1 = getCache().getRegion(regionName1);
         region1.put(key1, "VAL-2-2");
@@ -415,7 +415,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     region2.registerInterest(key2);
     assertEquals("VAL-2-1", region2.get(key2));
 
-    secondServerVM.invoke(new CacheSerializableRunnable("Put from second bridge server") {
+    secondServerVM.invoke(new CacheSerializableRunnable("Put from second cache server") {
       public void run2() throws CacheException {
         Region region1 = getCache().getRegion(regionName1);
         region1.put(key1, "VAL-2-3");
@@ -455,11 +455,11 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     final String name = this.getUniqueName();
     final String regionName1 = name + "-1";
 
-    // create first bridge server with region for client...
+    // create first cache server with region for client...
     final int firstServerIdx = 1;
 
     final VM firstServerVM = Host.getHost(0).getVM(firstServerIdx);
-    firstServerVM.invoke(new CacheSerializableRunnable("Create first bridge server") {
+    firstServerVM.invoke(new CacheSerializableRunnable("Create first cache server") {
       public void run2() throws CacheException {
         Cache cache = new CacheFactory().set("mcast-port", "0").create();
 
@@ -479,7 +479,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // get the bridge server ports...
+    // get the cache server ports...
     int port = firstServerVM.invoke(() -> ClientRegisterInterestDUnitTest.getBridgeServerPort());
 
     try {
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/GridAdvisorDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/GridAdvisorDUnitTest.java
index 99007e6..575ef63 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/GridAdvisorDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/GridAdvisorDUnitTest.java
@@ -70,7 +70,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
   }
 
   /**
-   * Tests 2 controllers and 2 bridge servers
+   * Tests 2 controllers and 2 cache servers
    */
   @Test
   public void test2by2() throws Exception {
@@ -283,7 +283,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
       }
     });
     vm1.invoke(
-        new SerializableRunnable("Verify bridge server view on " + bsPort1 + " and on " + bsPort3) {
+        new SerializableRunnable("Verify cache server view on " + bsPort1 + " and on " + bsPort3) {
           public void run() {
             Cache c = cache;
             List bslist = c.getCacheServers();
@@ -313,7 +313,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
           }
         });
     vm2.invoke(
-        new SerializableRunnable("Verify bridge server view on " + bsPort2 + " and on " + bsPort4) {
+        new SerializableRunnable("Verify cache server view on " + bsPort2 + " and on " + bsPort4) {
           public void run() {
             Cache c = cache;
             List bslist = c.getCacheServers();
@@ -343,7 +343,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
           }
         });
 
-    SerializableRunnable stopBS = new SerializableRunnable("stop bridge server") {
+    SerializableRunnable stopBS = new SerializableRunnable("stop cache server") {
       public void run() {
         Cache c = cache;
         List bslist = c.getCacheServers();
@@ -452,7 +452,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
         assertEquals(0, others.size());
       }
     });
-    vm2.invoke(new SerializableRunnable("Verify bridge server saw locator stop") {
+    vm2.invoke(new SerializableRunnable("Verify cache server saw locator stop") {
       public void run() {
         Cache c = cache;
         List bslist = c.getCacheServers();
@@ -475,7 +475,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
         }
       }
     });
-    vm1.invoke(new SerializableRunnable("Verify bridge server saw locator stop") {
+    vm1.invoke(new SerializableRunnable("Verify cache server saw locator stop") {
       public void run() {
         Cache c = cache;
         List bslist = c.getCacheServers();
@@ -503,7 +503,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
       }
     });
 
-    SerializableRunnable restartBS = new SerializableRunnable("restart bridge server") {
+    SerializableRunnable restartBS = new SerializableRunnable("restart cache server") {
       public void run() {
         try {
           Cache c = cache;
@@ -519,10 +519,10 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
         }
       }
     };
-    // restart bridge server 1 and see if controller sees it
+    // restart cache server 1 and see if controller sees it
     vm1.invoke(restartBS);
 
-    vm3.invoke(new SerializableRunnable("Verify bridge server restart ") {
+    vm3.invoke(new SerializableRunnable("Verify cache server restart ") {
       public void run() {
         assertTrue(Locator.hasLocator());
         InternalLocator l = (InternalLocator) Locator.getLocator();
@@ -557,7 +557,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
 
     vm1.invoke(disconnect);
     vm2.invoke(disconnect);
-    // now make sure controller saw all bridge servers stop
+    // now make sure controller saw all cache servers stop
 
     vm3.invoke(new SerializableRunnable("Verify locator stopped ") {
       public void run() {
@@ -783,7 +783,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
       }
     });
     vm1.invoke(
-        new SerializableRunnable("Verify bridge server view on " + bsPort1 + " and on " + bsPort3) {
+        new SerializableRunnable("Verify cache server view on " + bsPort1 + " and on " + bsPort3) {
           public void run() {
             Cache c = cache;
             List bslist = c.getCacheServers();
@@ -813,7 +813,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
           }
         });
     vm2.invoke(
-        new SerializableRunnable("Verify bridge server view on " + bsPort2 + " and on " + bsPort4) {
+        new SerializableRunnable("Verify cache server view on " + bsPort2 + " and on " + bsPort4) {
           public void run() {
             Cache c = cache;
             List bslist = c.getCacheServers();
@@ -843,7 +843,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
           }
         });
 
-    SerializableRunnable stopBS = new SerializableRunnable("stop bridge server") {
+    SerializableRunnable stopBS = new SerializableRunnable("stop cache server") {
       public void run() {
         Cache c = cache;
         List bslist = c.getCacheServers();
@@ -952,7 +952,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
         assertEquals(0, others.size());
       }
     });
-    vm2.invoke(new SerializableRunnable("Verify bridge server saw locator stop") {
+    vm2.invoke(new SerializableRunnable("Verify cache server saw locator stop") {
       public void run() {
         Cache c = cache;
         List bslist = c.getCacheServers();
@@ -975,7 +975,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
         }
       }
     });
-    vm1.invoke(new SerializableRunnable("Verify bridge server saw locator stop") {
+    vm1.invoke(new SerializableRunnable("Verify cache server saw locator stop") {
       public void run() {
         Cache c = cache;
         List bslist = c.getCacheServers();
@@ -1003,7 +1003,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
       }
     });
 
-    SerializableRunnable restartBS = new SerializableRunnable("restart bridge server") {
+    SerializableRunnable restartBS = new SerializableRunnable("restart cache server") {
       public void run() {
         try {
           Cache c = cache;
@@ -1019,10 +1019,10 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
         }
       }
     };
-    // restart bridge server 1 and see if controller sees it
+    // restart cache server 1 and see if controller sees it
     vm1.invoke(restartBS);
 
-    vm3.invoke(new SerializableRunnable("Verify bridge server restart ") {
+    vm3.invoke(new SerializableRunnable("Verify cache server restart ") {
       public void run() {
         assertTrue(Locator.hasLocator());
         InternalLocator l = (InternalLocator) Locator.getLocator();
@@ -1057,7 +1057,7 @@ public class GridAdvisorDUnitTest extends JUnit4DistributedTestCase {
 
     vm1.invoke(disconnect);
     vm2.invoke(disconnect);
-    // now make sure controller saw all bridge servers stop
+    // now make sure controller saw all cache servers stop
 
     vm3.invoke(new SerializableRunnable("Verify locator stopped ") {
       public void run() {
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionExecutionDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionExecutionDUnitTest.java
index 0301ba8..4adf97f 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionExecutionDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionExecutionDUnitTest.java
@@ -1324,10 +1324,10 @@ public class DistributedRegionFunctionExecutionDUnitTest extends JUnit4Distribut
     Wait.pause(2000);
     Collection bridgeServers = cache.getCacheServers();
     LogWriterUtils.getLogWriter()
-        .info("Start Server Bridge Servers list : " + bridgeServers.size());
+        .info("Start Server cache servers list : " + bridgeServers.size());
     Iterator bridgeIterator = bridgeServers.iterator();
     CacheServer bridgeServer = (CacheServer) bridgeIterator.next();
-    LogWriterUtils.getLogWriter().info("start Server Bridge Server" + bridgeServer);
+    LogWriterUtils.getLogWriter().info("start Server cache server" + bridgeServer);
     try {
       bridgeServer.start();
     } catch (IOException e) {
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerTestBase.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerTestBase.java
index 73833ef..bbc1d9b 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerTestBase.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerTestBase.java
@@ -630,10 +630,10 @@ public class PRClientServerTestBase extends JUnit4CacheTestCase {
     Wait.pause(2000);
     Collection bridgeServers = cache.getCacheServers();
     LogWriterUtils.getLogWriter()
-        .info("Start Server Bridge Servers list : " + bridgeServers.size());
+        .info("Start Server cache servers list : " + bridgeServers.size());
     Iterator bridgeIterator = bridgeServers.iterator();
     CacheServer bridgeServer = (CacheServer) bridgeIterator.next();
-    LogWriterUtils.getLogWriter().info("start Server Bridge Server" + bridgeServer);
+    LogWriterUtils.getLogWriter().info("start Server cache server" + bridgeServer);
     bridgeServer.start();
   }
 
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/ha/HARQueueNewImplDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/ha/HARQueueNewImplDUnitTest.java
index e17a5d7..89fc025 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/ha/HARQueueNewImplDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/ha/HARQueueNewImplDUnitTest.java
@@ -613,7 +613,7 @@ public class HARQueueNewImplDUnitTest extends JUnit4DistributedTestCase {
   }
 
   /**
-   * This test verifies that two clients, connected to two bridge servers with different
+   * This test verifies that two clients, connected to two cache servers with different
    * notifyBySubscription values, on a single VM, receive updates/invalidates depending upon their
    * notifyBySubscription value.
    */
@@ -680,10 +680,10 @@ public class HARQueueNewImplDUnitTest extends JUnit4DistributedTestCase {
   }
 
   /**
-   * This test verifies that client-messages-regions are not created for the bridge servers who have
-   * eviction policy as 'none'. Instead, such bridge servers will have simple HashMap structures.
+   * This test verifies that client-messages-regions are not created for the cache servers who have
+   * eviction policy as 'none'. Instead, such cache servers will have simple HashMap structures.
    * Also, it verifies that such a structure (referred to as haContainer, in general) is destroyed
-   * when its bridge server is stopped.
+   * when its cache server is stopped.
    */
   @Test
   public void testCMRNotCreatedForNoneEvictionPolicy() throws Exception {
@@ -728,9 +728,9 @@ public class HARQueueNewImplDUnitTest extends JUnit4DistributedTestCase {
   }
 
   /**
-   * This test verifies that client-messages-regions are created for the bridge servers who have
+   * This test verifies that client-messages-regions are created for the cache servers who have
    * eviction policy either as 'mem' or as 'entry'. Also, it verifies that such a
-   * client-messages-region is destroyed when its bridge server is stopped.
+   * client-messages-region is destroyed when its cache server is stopped.
    */
   @Test
   public void testCMRCreatedForMemOrEntryEvictionPolicy() throws Exception {
@@ -767,7 +767,7 @@ public class HARQueueNewImplDUnitTest extends JUnit4DistributedTestCase {
 
   /**
    * This test verifies that the Cache.rootRegions() method does not return the
-   * client-messages-region of any of the cache's attached bridge servers.
+   * client-messages-region of any of the cache's attached cache servers.
    */
   @Test
   public void testCMRNotReturnedByRootRegionsMethod() throws Exception {
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/CacheServerTransactionsSelectorDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/CacheServerTransactionsSelectorDUnitTest.java
index dd81cfe..01235e6 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/CacheServerTransactionsSelectorDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/CacheServerTransactionsSelectorDUnitTest.java
@@ -19,7 +19,7 @@ import org.junit.experimental.categories.Category;
 import org.apache.geode.test.junit.categories.ClientServerTest;
 
 /**
- * Just like CacheServerTransactionsDUnitTest but configures bridge server with thread pool (i.e.
+ * Just like CacheServerTransactionsDUnitTest but configures cache server with thread pool (i.e.
  * selector).
  */
 @Category({ClientServerTest.class})
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/ForceInvalidateEvictionDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/ForceInvalidateEvictionDUnitTest.java
index f5b8b42..56c2fde 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/ForceInvalidateEvictionDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/ForceInvalidateEvictionDUnitTest.java
@@ -367,7 +367,7 @@ public class ForceInvalidateEvictionDUnitTest extends JUnit4CacheTestCase {
 
   private int addCacheServer(VM vm) {
     final int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    vm.invoke(new SerializableRunnable("add bridge server") {
+    vm.invoke(new SerializableRunnable("add cache server") {
       public void run() {
         Cache cache = getCache();
         CacheServer server = cache.addCacheServer();
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/InterestListEndpointSelectorDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/InterestListEndpointSelectorDUnitTest.java
index 10235bf..6125311 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/InterestListEndpointSelectorDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/InterestListEndpointSelectorDUnitTest.java
@@ -19,7 +19,7 @@ import org.junit.experimental.categories.Category;
 import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
 
 /**
- * Just like InterestListEndpointDUnitTest but uses thread pool (i.e. selector) in bridge servers
+ * Just like InterestListEndpointDUnitTest but uses thread pool (i.e. selector) in cache servers
  *
  */
 @Category({ClientSubscriptionTest.class})
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/RedundancyLevelTestBase.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/RedundancyLevelTestBase.java
index 13d884c..c567cef 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/RedundancyLevelTestBase.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/RedundancyLevelTestBase.java
@@ -274,7 +274,7 @@ public class RedundancyLevelTestBase extends JUnit4DistributedTestCase {
   }
 
   static void verifyInterestRegistration() {
-    await("Number of bridge servers (" + cache.getCacheServers().size() + ") never became 1")
+    await("Number of cache servers (" + cache.getCacheServers().size() + ") never became 1")
         .until(() -> cache.getCacheServers().size(), equalTo(1));
 
     CacheServerImpl bs = (CacheServerImpl) cache.getCacheServers().iterator().next();
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/ReliableMessagingDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/ReliableMessagingDUnitTest.java
index c5dc827..536ad40 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/ReliableMessagingDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/ReliableMessagingDUnitTest.java
@@ -260,7 +260,7 @@ public class ReliableMessagingDUnitTest extends JUnit4DistributedTestCase {
   }
 
   /**
-   * Wait for new value on bridge server to become visible in this cache
+   * Wait for new value on cache server to become visible in this cache
    */
   public static void waitForServerUpdate() {
     Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/management/UniversalMembershipListenerAdapterDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/management/UniversalMembershipListenerAdapterDUnitTest.java
index acfe49c..ab8ea70 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/management/UniversalMembershipListenerAdapterDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/management/UniversalMembershipListenerAdapterDUnitTest.java
@@ -1058,7 +1058,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
   }
 
   /**
-   * Tests notification of events for bridge server in system bridge client process.
+   * Tests notification of events for cache server in system bridge client process.
    */
   @Test
   public void testServerEventsInPeerSystem() throws Exception {
@@ -1310,7 +1310,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
   }
 
   /**
-   * Tests notification of events for bridge server in system bridge client process.
+   * Tests notification of events for cache server in system bridge client process.
    */
   @Test
   public void testServerEventsInLonerClient() throws Exception {
@@ -1438,7 +1438,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     // gather details for later creation of pool...
     assertThat((int) vm0.invoke("getServerPort", () -> serverPort)).isEqualTo(ports[0]);
 
-    // create region which connects to bridge server
+    // create region which connects to cache server
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
     configureConnectionPool(factory, getServerHostName(host), ports, false, -1, -1, null);
@@ -1495,7 +1495,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     addIgnoredException(IOException.class.getName());
     addIgnoredException(ConnectException.class.getName());
 
-    vm0.invoke(new SerializableRunnable("Disconnect bridge server") {
+    vm0.invoke(new SerializableRunnable("Disconnect cache server") {
       @Override
       public void run() {
         closeCache();
diff --git a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/ClientMessagesRegionCreationAndDestroyJUnitTest.java b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/ClientMessagesRegionCreationAndDestroyJUnitTest.java
index 8b8ab54..b8e533e 100755
--- a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/ClientMessagesRegionCreationAndDestroyJUnitTest.java
+++ b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/ClientMessagesRegionCreationAndDestroyJUnitTest.java
@@ -47,7 +47,7 @@ public class ClientMessagesRegionCreationAndDestroyJUnitTest {
 
   /** The cache instance */
   private Cache cache = null;
-  // max number of bridge server can attached to the cache
+  // max number of cache server can attached to the cache
   private int brigeNum = 5;
   // stores corresponding names of client messages region created by bridge
   // server
@@ -64,7 +64,7 @@ public class ClientMessagesRegionCreationAndDestroyJUnitTest {
   }
 
   /**
-   * Create and attach bridge server to cache
+   * Create and attach cache server to cache
    *
    */
 
@@ -110,12 +110,12 @@ public class ClientMessagesRegionCreationAndDestroyJUnitTest {
   }
 
   /**
-   * Attach bridge server
+   * Attach cache server
    */
   private void attachmentOfBridgeServer() {
     if (cache.getCacheServers().size() < brigeNum) {
       try {
-        // attaching and starting bridge server
+        // attaching and starting cache server
         attachBridgeServer();
       } catch (IOException e) {
         e.printStackTrace();
@@ -124,10 +124,10 @@ public class ClientMessagesRegionCreationAndDestroyJUnitTest {
   }
 
   /**
-   * Stop's all bridge servers attached
+   * Stop's all cache servers attached
    */
   private void dettachmentOfBridgeServer() {
-    // detach all bridge server to test destroy of client_messages_region
+    // detach all cache server to test destroy of client_messages_region
     for (Iterator itr = cache.getCacheServers().iterator(); itr.hasNext();) {
       CacheServerImpl server = (CacheServerImpl) itr.next();
       String rName =
diff --git a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/DiskRegionJUnitTest.java b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/DiskRegionJUnitTest.java
index e781b4f..8017e19 100755
--- a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/DiskRegionJUnitTest.java
+++ b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/DiskRegionJUnitTest.java
@@ -1678,7 +1678,7 @@ public class DiskRegionJUnitTest {
 
   /**
    * If IOException occurs while updating an entry in an already initialized DiskRegion ,then the
-   * bridge servers should not be stopped , if any running as they are no clients connected to it.
+   * cache servers should not be stopped , if any running as they are no clients connected to it.
    */
   @Test
   public void testBridgeServerStoppingInSyncPersistOnlyForIOExceptionCase() throws Exception {
@@ -1777,7 +1777,7 @@ public class DiskRegionJUnitTest {
   }
 
   /**
-   * If IOException occurs while initializing a region, then the bridge servers should not be
+   * If IOException occurs while initializing a region, then the cache servers should not be
    * stopped
    */
   @Test
diff --git a/geode-core/src/main/java/org/apache/geode/CopyHelper.java b/geode-core/src/main/java/org/apache/geode/CopyHelper.java
index 635402c..9052b3c 100644
--- a/geode-core/src/main/java/org/apache/geode/CopyHelper.java
+++ b/geode-core/src/main/java/org/apache/geode/CopyHelper.java
@@ -274,11 +274,11 @@ public final class CopyHelper {
       return (T) DataSerializer.readObject(new DataInputStream(hdos.getInputStream()));
     } catch (ClassNotFoundException ex) {
       throw new CopyException(
-          String.format("Copy failed on instance of  %s", o.getClass()),
+          String.format("Copy failed on instance of %s", o.getClass()),
           ex);
     } catch (IOException ex) {
       throw new CopyException(
-          String.format("Copy failed on instance of  %s", o.getClass()),
+          String.format("Copy failed on instance of %s", o.getClass()),
           ex);
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/DataSerializer.java b/geode-core/src/main/java/org/apache/geode/DataSerializer.java
index 215da08..02dee91 100644
--- a/geode-core/src/main/java/org/apache/geode/DataSerializer.java
+++ b/geode-core/src/main/java/org/apache/geode/DataSerializer.java
@@ -1321,7 +1321,7 @@ public abstract class DataSerializer {
           DataSerializer.writeObject(object, hdos);
         } catch (IOException e) {
           RuntimeException e2 = new IllegalArgumentException(
-              "Probelm while serializing.");
+              "Problem while serializing.");
           e2.initCause(e);
           throw e2;
         }
diff --git a/geode-core/src/main/java/org/apache/geode/Instantiator.java b/geode-core/src/main/java/org/apache/geode/Instantiator.java
index 07763a6..65aafba 100644
--- a/geode-core/src/main/java/org/apache/geode/Instantiator.java
+++ b/geode-core/src/main/java/org/apache/geode/Instantiator.java
@@ -195,13 +195,13 @@ public abstract class Instantiator {
 
     if (!DataSerializable.class.isAssignableFrom(c)) {
       throw new IllegalArgumentException(
-          String.format("Class  %s  does not implement DataSerializable",
+          String.format("Class %s does not implement DataSerializable",
               c.getName()));
     }
 
     if (classId == 0) {
       throw new IllegalArgumentException(
-          String.format("Class id  %s  must not be 0.", classId));
+          String.format("Class id %s must not be 0.", classId));
     }
 
     this.clazz = c;
diff --git a/geode-core/src/main/java/org/apache/geode/SystemFailure.java b/geode-core/src/main/java/org/apache/geode/SystemFailure.java
index 918d617..474e117 100644
--- a/geode-core/src/main/java/org/apache/geode/SystemFailure.java
+++ b/geode-core/src/main/java/org/apache/geode/SystemFailure.java
@@ -576,7 +576,7 @@ public final class SystemFailure {
     // Allocate this error in advance, since it's too late once it's been detected!
     final OutOfMemoryError oome = new OutOfMemoryError(
         String.format(
-            "%s : memory has remained chronically below  %s  bytes (out of a maximum of  %s ) for  %s  sec.",
+            "%s : memory has remained chronically below %s bytes (out of a maximum of %s ) for %s sec.",
             new Object[] {PROCTOR_NAME, Long.valueOf(minimumMemoryThreshold),
                 Long.valueOf(maxMemory), Integer.valueOf(WATCHDOG_WAIT)}));
 
diff --git a/geode-core/src/main/java/org/apache/geode/admin/AlertLevel.java b/geode-core/src/main/java/org/apache/geode/admin/AlertLevel.java
index 2ea93a5..6044405 100755
--- a/geode-core/src/main/java/org/apache/geode/admin/AlertLevel.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/AlertLevel.java
@@ -79,7 +79,7 @@ public class AlertLevel implements java.io.Serializable {
       case Alert.OFF:
         return AlertLevel.OFF;
       default:
-        throw new IllegalArgumentException(String.format("Unknown alert severity:  %s",
+        throw new IllegalArgumentException(String.format("Unknown alert severity: %s",
             Integer.valueOf(severity)));
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/admin/SystemMemberBridgeServer.java b/geode-core/src/main/java/org/apache/geode/admin/SystemMemberBridgeServer.java
index ae8b770..8e692c4 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/SystemMemberBridgeServer.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/SystemMemberBridgeServer.java
@@ -29,37 +29,37 @@ import org.apache.geode.cache.server.ServerLoadProbe;
 public interface SystemMemberBridgeServer {
 
   /**
-   * Returns the port on which this bridge server listens for bridge clients to connect.
+   * Returns the port on which this cache server listens for bridge clients to connect.
    */
   int getPort();
 
   /**
-   * Sets the port on which this bridge server listens for bridge clients to connect.
+   * Sets the port on which this cache server listens for bridge clients to connect.
    *
-   * @throws AdminException If this bridge server is running
+   * @throws AdminException If this cache server is running
    */
   void setPort(int port) throws AdminException;
 
   /**
-   * Starts this bridge server. Once the server is running, its configuration cannot be changed.
+   * Starts this cache server. Once the server is running, its configuration cannot be changed.
    *
-   * @throws AdminException If an error occurs while starting the bridge server
+   * @throws AdminException If an error occurs while starting the cache server
    */
   void start() throws AdminException;
 
   /**
-   * Returns whether or not this bridge server is running
+   * Returns whether or not this cache server is running
    */
   boolean isRunning();
 
   /**
-   * Stops this bridge server. Note that the <code>BridgeServer</code> can be reconfigured and
+   * Stops this cache server. Note that the <code>BridgeServer</code> can be reconfigured and
    * restarted if desired.
    */
   void stop() throws AdminException;
 
   /**
-   * Updates the information about this bridge server.
+   * Updates the information about this cache server.
    */
   void refresh();
 
@@ -74,7 +74,7 @@ public interface SystemMemberBridgeServer {
   /**
    * Sets the ip address or host name that this server is to listen on for client connections.
    * <p>
-   * Setting a specific bind address will cause the bridge server to always use this address and
+   * Setting a specific bind address will cause the cache server to always use this address and
    * ignore any address specified by "server-bind-address" or "bind-address" in the
    * <code>gemfire.properties</code> file (see
    * {@link org.apache.geode.distributed.DistributedSystem} for a description of these properties).
@@ -86,7 +86,7 @@ public interface SystemMemberBridgeServer {
    * <code>"0.0.0.0"</code>.
    *
    * @param address the ip address or host name that this server is to listen on
-   * @throws AdminException if this bridge server is running
+   * @throws AdminException if this cache server is running
    * @since GemFire 5.7
    */
   void setBindAddress(String address) throws AdminException;
@@ -112,31 +112,31 @@ public interface SystemMemberBridgeServer {
    *
    * @param name the ip address or host name that will be given to clients so they can connect to
    *        this server
-   * @throws AdminException if this bridge server is running
+   * @throws AdminException if this cache server is running
    * @since GemFire 5.7
    */
   void setHostnameForClients(String name) throws AdminException;
 
   /**
-   * Sets whether or not this bridge server should notify clients based on key subscription.
+   * Sets whether or not this cache server should notify clients based on key subscription.
    *
    * If false, then an update to any key on the server causes an update to be sent to all clients.
    * This update does not push the actual data to the clients. Instead, it causes the client to
    * locally invalidate or destroy the corresponding entry. The next time the client requests the
-   * key, it goes to the bridge server for the value.
+   * key, it goes to the cache server for the value.
    *
    * If true, then an update to any key on the server causes an update to be sent to only those
    * clients who have registered interest in that key. Other clients are not notified of the change.
    * In addition, the actual value is pushed to the client. The client does not need to request the
-   * new value from the bridge server.
+   * new value from the cache server.
    *
-   * @throws AdminException if this bridge server is running
+   * @throws AdminException if this cache server is running
    * @since GemFire 5.7
    */
   void setNotifyBySubscription(boolean b) throws AdminException;
 
   /**
-   * Answers whether or not this bridge server should notify clients based on key subscription.
+   * Answers whether or not this cache server should notify clients based on key subscription.
    *
    * @since GemFire 5.7
    */
@@ -147,7 +147,7 @@ public interface SystemMemberBridgeServer {
    * default is 32768 bytes.
    *
    * @param socketBufferSize The size in bytes of the socket buffer
-   * @throws AdminException if this bridge server is running
+   * @throws AdminException if this cache server is running
    * @since GemFire 5.7
    */
   void setSocketBufferSize(int socketBufferSize) throws AdminException;
@@ -167,7 +167,7 @@ public interface SystemMemberBridgeServer {
    * clients. The default is 60000 ms.
    *
    * @param maximumTimeBetweenPings The maximum amount of time between client pings
-   * @throws AdminException if this bridge server is running
+   * @throws AdminException if this cache server is running
    * @since GemFire 5.7
    */
   void setMaximumTimeBetweenPings(int maximumTimeBetweenPings) throws AdminException;
@@ -193,7 +193,7 @@ public interface SystemMemberBridgeServer {
    * Sets the maxium number of client connections allowed. When the maximum is reached the server
    * will stop accepting connections.
    *
-   * @throws AdminException if this bridge server is running
+   * @throws AdminException if this cache server is running
    * @since GemFire 5.7
    */
   void setMaxConnections(int maxCons) throws AdminException;
@@ -210,7 +210,7 @@ public interface SystemMemberBridgeServer {
    * Sets the maxium number of threads allowed in this server to service client requests. The
    * default of <code>0</code> causes the server to dedicate a thread for every client connection.
    *
-   * @throws AdminException if this bridge server is running
+   * @throws AdminException if this cache server is running
    * @since GemFire 5.7
    */
   void setMaxThreads(int maxThreads) throws AdminException;
@@ -225,7 +225,7 @@ public interface SystemMemberBridgeServer {
   /**
    * Sets maximum number of messages that can be enqueued in a client-queue.
    *
-   * @throws AdminException if this bridge server is running
+   * @throws AdminException if this cache server is running
    * @since GemFire 5.7
    */
   void setMaximumMessageCount(int maxMessageCount) throws AdminException;
@@ -240,67 +240,67 @@ public interface SystemMemberBridgeServer {
   /**
    * Sets the time (in seconds ) after which a message in the client queue will expire.
    *
-   * @throws AdminException if this bridge server is running
+   * @throws AdminException if this cache server is running
    * @since GemFire 5.7
    */
   void setMessageTimeToLive(int messageTimeToLive) throws AdminException;
 
   /**
-   * Sets the list of server groups this bridge server will belong to. By default bridge servers
-   * belong to the default global server group which all bridge servers always belong to.
+   * Sets the list of server groups this cache server will belong to. By default cache servers
+   * belong to the default global server group which all cache servers always belong to.
    *
    * @param groups possibly empty array of <code>String</code> where each string is a server groups
-   *        that this bridge server will be a member of.
-   * @throws AdminException if this bridge server is running
+   *        that this cache server will be a member of.
+   * @throws AdminException if this cache server is running
    * @since GemFire 5.7
    */
   void setGroups(String[] groups) throws AdminException;
 
   /**
-   * Returns the list of server groups that this bridge server belongs to.
+   * Returns the list of server groups that this cache server belongs to.
    *
    * @return a possibly empty array of <code>String</code>s where each string is a server group.
-   *         Modifying this array will not change the server groups that this bridge server belongs
+   *         Modifying this array will not change the server groups that this cache server belongs
    *         to.
    * @since GemFire 5.7
    */
   String[] getGroups();
 
   /**
-   * Get a description of the load probe for this bridge server. {@link ServerLoadProbe} for details
+   * Get a description of the load probe for this cache server. {@link ServerLoadProbe} for details
    * on the load probe.
    *
-   * @return the load probe used by this bridge server.
+   * @return the load probe used by this cache server.
    * @since GemFire 5.7
    */
   String getLoadProbe();
 
   /**
-   * Set the load probe for this bridge server. See {@link ServerLoadProbe} for details on how to
+   * Set the load probe for this cache server. See {@link ServerLoadProbe} for details on how to
    * implement a load probe.
    *
    * The load probe should implement DataSerializable if it is used with this interface, because it
    * will be sent to the remote VM.
    *
-   * @param loadProbe the load probe to use for this bridge server.
-   * @throws AdminException if the bridge server is running
+   * @param loadProbe the load probe to use for this cache server.
+   * @throws AdminException if the cache server is running
    * @since GemFire 5.7
    */
   void setLoadProbe(ServerLoadProbe loadProbe) throws AdminException;
 
   /**
-   * Get the frequency in milliseconds to poll the load probe on this bridge server.
+   * Get the frequency in milliseconds to poll the load probe on this cache server.
    *
    * @return the frequency in milliseconds that we will poll the load probe.
    */
   long getLoadPollInterval();
 
   /**
-   * Set the frequency in milliseconds to poll the load probe on this bridge server
+   * Set the frequency in milliseconds to poll the load probe on this cache server
    *
    * @param loadPollInterval the frequency in milliseconds to poll the load probe. Must be greater
    *        than 0.
-   * @throws AdminException if the bridge server is running
+   * @throws AdminException if the cache server is running
    */
   void setLoadPollInterval(long loadPollInterval) throws AdminException;
 
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java b/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java
index f622ab5..c2b41bf 100755
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java
@@ -504,14 +504,14 @@ public class AdminDistributedSystemImpl implements org.apache.geode.admin.AdminD
       try {
         if (!locs[i].waitToStart(TIMEOUT_MS)) {
           throw new AdminException(
-              String.format("%s  did not start after  %s  ms",
+              String.format("%s did not start after %s ms",
                   new Object[] {locs[i], Integer.valueOf(TIMEOUT_MS)}));
         }
 
       } catch (InterruptedException ex) {
         Thread.currentThread().interrupt();
         throw new AdminException(
-            String.format("Interrupted while waiting for  %s  to start.",
+            String.format("Interrupted while waiting for %s to start.",
                 locs[i]),
             ex);
       }
@@ -525,14 +525,14 @@ public class AdminDistributedSystemImpl implements org.apache.geode.admin.AdminD
       try {
         if (!servers[i].waitToStart(TIMEOUT_MS)) {
           throw new AdminException(
-              String.format("%s  did not start after  %s  ms",
+              String.format("%s did not start after %s ms",
                   new Object[] {servers[i], Integer.valueOf(TIMEOUT_MS)}));
         }
 
       } catch (InterruptedException ex) {
         Thread.currentThread().interrupt();
         throw new AdminException(
-            String.format("Interrupted while waiting for  %s  to start.",
+            String.format("Interrupted while waiting for %s to start.",
                 servers[i]),
             ex);
       }
@@ -558,14 +558,14 @@ public class AdminDistributedSystemImpl implements org.apache.geode.admin.AdminD
       try {
         if (!servers[i].waitToStop(timeout * 1000)) {
           throw new AdminException(
-              String.format("%s  did not stop after  %s  seconds.",
+              String.format("%s did not stop after %s seconds.",
                   new Object[] {servers[i], Long.valueOf(timeout)}));
         }
 
       } catch (InterruptedException ex) {
         Thread.currentThread().interrupt();
         throw new AdminException(
-            String.format("Interrupted while waiting for  %s  to stop.",
+            String.format("Interrupted while waiting for %s to stop.",
                 servers[i]),
             ex);
       }
@@ -579,14 +579,14 @@ public class AdminDistributedSystemImpl implements org.apache.geode.admin.AdminD
       try {
         if (!locs[i].waitToStop(timeout * 1000)) {
           throw new AdminException(
-              String.format("%s  did not stop after  %s  seconds.",
+              String.format("%s did not stop after %s seconds.",
                   new Object[] {locs[i], Long.valueOf(timeout)}));
         }
 
       } catch (InterruptedException ex) {
         Thread.currentThread().interrupt();
         throw new AdminException(
-            String.format("Interrupted while waiting for  %s  to stop.",
+            String.format("Interrupted while waiting for %s to stop.",
                 locs[i]),
             ex);
       }
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/ConfigurationParameterImpl.java b/geode-core/src/main/java/org/apache/geode/admin/internal/ConfigurationParameterImpl.java
index 564cfb6..a178053 100755
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/ConfigurationParameterImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/ConfigurationParameterImpl.java
@@ -155,17 +155,17 @@ public class ConfigurationParameterImpl implements org.apache.geode.admin.Config
   public void setValue(Object value) throws UnmodifiableConfigurationException {
     if (!isModifiable()) {
       throw new UnmodifiableConfigurationException(
-          String.format("%s  is not a modifiable configuration parameter",
+          String.format("%s is not a modifiable configuration parameter",
               getName()));
     }
     if (value == null) {
       throw new IllegalArgumentException(
-          String.format("Unable to set  %s  to null value",
+          String.format("Unable to set %s to null value",
               getName()));
     }
     if (!getValueType().equals(value.getClass())) {
       throw new IllegalArgumentException(
-          String.format("Unable to set type  %s  with type  %s",
+          String.format("Unable to set type %s with type %s",
 
               new Object[] {getValueType().getName(), value.getClass().getName()}));
     }
@@ -205,7 +205,7 @@ public class ConfigurationParameterImpl implements org.apache.geode.admin.Config
   protected void setValueFromString(String newValue) {
     if (newValue == null) {
       throw new IllegalArgumentException(
-          String.format("Unable to set  %s  to null value",
+          String.format("Unable to set %s to null value",
               getName()));
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/DistributedSystemConfigImpl.java b/geode-core/src/main/java/org/apache/geode/admin/internal/DistributedSystemConfigImpl.java
index 35519c3..224a252 100755
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/DistributedSystemConfigImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/DistributedSystemConfigImpl.java
@@ -597,7 +597,7 @@ public class DistributedSystemConfigImpl implements DistributedSystemConfig {
   private void basicSetBindAddress(String bindAddress) {
     if (!validateBindAddress(bindAddress)) {
       throw new IllegalArgumentException(
-          String.format("Invalid bind address:  %s",
+          String.format("Invalid bind address: %s",
               bindAddress));
     }
     this.bindAddress = bindAddress;
@@ -606,7 +606,7 @@ public class DistributedSystemConfigImpl implements DistributedSystemConfig {
   private void basicSetServerBindAddress(String bindAddress) {
     if (!validateBindAddress(bindAddress)) {
       throw new IllegalArgumentException(
-          String.format("Invalid bind address:  %s",
+          String.format("Invalid bind address: %s",
               bindAddress));
     }
     this.serverBindAddress = bindAddress;
@@ -1001,7 +1001,7 @@ public class DistributedSystemConfigImpl implements DistributedSystemConfig {
   public void validate() {
     if (this.getMcastPort() < MIN_MCAST_PORT || this.getMcastPort() > MAX_MCAST_PORT) {
       throw new IllegalArgumentException(
-          String.format("mcastPort must be an integer inclusively between  %s  and  %s",
+          String.format("mcastPort must be an integer inclusively between %s and %s",
 
               new Object[] {Integer.valueOf(MIN_MCAST_PORT), Integer.valueOf(MAX_MCAST_PORT)}));
     }
@@ -1011,7 +1011,7 @@ public class DistributedSystemConfigImpl implements DistributedSystemConfig {
     if (this.logFileSizeLimit < MIN_LOG_FILE_SIZE_LIMIT
         || this.logFileSizeLimit > MAX_LOG_FILE_SIZE_LIMIT) {
       throw new IllegalArgumentException(
-          String.format("LogFileSizeLimit must be an integer between  %s  and  %s",
+          String.format("LogFileSizeLimit must be an integer between %s and %s",
               new Object[] {Integer.valueOf(MIN_LOG_FILE_SIZE_LIMIT),
                   Integer.valueOf(MAX_LOG_FILE_SIZE_LIMIT)}));
     }
@@ -1019,7 +1019,7 @@ public class DistributedSystemConfigImpl implements DistributedSystemConfig {
     if (this.logDiskSpaceLimit < MIN_LOG_DISK_SPACE_LIMIT
         || this.logDiskSpaceLimit > MAX_LOG_DISK_SPACE_LIMIT) {
       throw new IllegalArgumentException(
-          String.format("LogDiskSpaceLimit must be an integer between  %s  and  %s",
+          String.format("LogDiskSpaceLimit must be an integer between %s and %s",
               new Object[] {Integer.valueOf(MIN_LOG_DISK_SPACE_LIMIT),
                   Integer.valueOf(MAX_LOG_DISK_SPACE_LIMIT)}));
     }
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/DistributionLocatorConfigImpl.java b/geode-core/src/main/java/org/apache/geode/admin/internal/DistributionLocatorConfigImpl.java
index 32dec35..0802ecc 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/DistributionLocatorConfigImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/DistributionLocatorConfigImpl.java
@@ -154,7 +154,7 @@ public class DistributionLocatorConfigImpl extends ManagedEntityConfigImpl
 
     if (port < MIN_PORT || port > MAX_PORT) {
       throw new IllegalArgumentException(
-          String.format("Port ( %s ) must be an integer between  %s  and  %s",
+          String.format("Port ( %s ) must be an integer between %s and %s",
               new Object[] {Integer.valueOf(port), Integer.valueOf(MIN_PORT),
                   Integer.valueOf(MAX_PORT)}));
     }
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/ManagedEntityConfigXml.java b/geode-core/src/main/java/org/apache/geode/admin/internal/ManagedEntityConfigXml.java
index 519d2e8..9a49ca2 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/ManagedEntityConfigXml.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/ManagedEntityConfigXml.java
@@ -131,7 +131,7 @@ abstract class ManagedEntityConfigXml implements EntityResolver, ErrorHandler {
         result = new InputSource(stream);
       } else {
         throw new SAXNotRecognizedException(
-            String.format("DTD not found:  %s", location));
+            String.format("DTD not found: %s", location));
       }
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/SystemMemberBridgeServerImpl.java b/geode-core/src/main/java/org/apache/geode/admin/internal/SystemMemberBridgeServerImpl.java
index dbfbb3d..f4ff003 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/SystemMemberBridgeServerImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/SystemMemberBridgeServerImpl.java
@@ -33,13 +33,13 @@ import org.apache.geode.internal.admin.GemFireVM;
 public class SystemMemberBridgeServerImpl
     implements SystemMemberCacheServer, SystemMemberBridgeServer {
 
-  /** The VM in which the bridge server resides */
+  /** The VM in which the cache server resides */
   private final GemFireVM vm;
 
-  /** The cache server by this bridge server */
+  /** The cache server by this cache server */
   private CacheInfo cache;
 
-  /** Information about the bridge server */
+  /** Information about the cache server */
   private AdminBridgeServer bridgeInfo;
 
   ///////////////////// Constructors /////////////////////
@@ -60,12 +60,12 @@ public class SystemMemberBridgeServerImpl
   //////////////////// Instance Methods ////////////////////
 
   /**
-   * Throws an <code>AdminException</code> if this bridge server is running.
+   * Throws an <code>AdminException</code> if this cache server is running.
    */
   private void checkRunning() throws AdminException {
     if (this.isRunning()) {
       throw new AdminException(
-          "Cannot change the configuration of a running bridge server.");
+          "Cannot change the configuration of a running cache server.");
     }
   }
 
@@ -91,7 +91,7 @@ public class SystemMemberBridgeServerImpl
   }
 
   /**
-   * Returns the VM-unique id of this bridge server
+   * Returns the VM-unique id of this cache server
    */
   protected int getBridgeId() {
     return this.bridgeInfo.getId();
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/SystemMemberCacheImpl.java b/geode-core/src/main/java/org/apache/geode/admin/internal/SystemMemberCacheImpl.java
index ea2b69a..aece52b 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/SystemMemberCacheImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/SystemMemberCacheImpl.java
@@ -49,7 +49,7 @@ public class SystemMemberCacheImpl implements SystemMemberCache {
   protected CacheInfo info;
   protected Statistic[] statistics;
 
-  /** Maps the id of a bridge server to its SystemMemberBridgeServer */
+  /** Maps the id of a cache server to its SystemMemberBridgeServer */
   private ObjIdMap bridgeServers = new ObjIdMap();
 
   // constructors
@@ -58,7 +58,7 @@ public class SystemMemberCacheImpl implements SystemMemberCache {
     this.info = vm.getCacheInfo();
     if (this.info == null) {
       throw new CacheDoesNotExistException(
-          String.format("The VM  %s  does not currently have a cache.",
+          String.format("The VM %s does not currently have a cache.",
               vm.getId()));
     }
     initStats();
diff --git a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentConfigImpl.java b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentConfigImpl.java
index da41066..ba447c9 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentConfigImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentConfigImpl.java
@@ -496,7 +496,7 @@ public class AgentConfigImpl extends DistributedSystemConfigImpl implements Agen
       }
     } else {
       throw new IllegalArgumentException(
-          String.format("Specified properties file does not exist:  %s",
+          String.format("Specified properties file does not exist: %s",
               propFile));
     }
 
@@ -1209,7 +1209,7 @@ public class AgentConfigImpl extends DistributedSystemConfigImpl implements Agen
       String description = _getPropertyDescription(prop);
       if (description == null) {
         throw new IllegalArgumentException(
-            String.format("Unknown config property:  %s", prop));
+            String.format("Unknown config property: %s", prop));
 
       } else {
         return description;
diff --git a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentImpl.java b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentImpl.java
index 8fdc1f1..0356f88 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentImpl.java
@@ -194,7 +194,7 @@ public class AgentImpl implements org.apache.geode.admin.jmx.Agent,
     try {
       this.objectName = new ObjectName(this.mbeanName);
     } catch (MalformedObjectNameException ex) {
-      String s = String.format("While creating ObjectName:  %s",
+      String s = String.format("While creating ObjectName: %s",
           new Object[] {this.mbeanName});
       throw new AdminException(s, ex);
     }
@@ -316,7 +316,7 @@ public class AgentImpl implements org.apache.geode.admin.jmx.Agent,
         logger.error(autoConnectFailed, ex.getMessage());
         this.stop();
         throw new StartupException(new AdminException(
-            String.format("auto connect failed:  %s", ex.getMessage()), ex));
+            String.format("auto connect failed: %s", ex.getMessage()), ex));
       }
     } // getAutoConnect
 
@@ -539,13 +539,13 @@ public class AgentImpl implements org.apache.geode.admin.jmx.Agent,
     File f = (new File(value)).getAbsoluteFile();
     if (f.isDirectory()) {
       throw new IllegalArgumentException(
-          String.format("The file  %s  is a directory.", f));
+          String.format("The file %s is a directory.", f));
     }
     File parent = f.getParentFile();
     if (parent != null) {
       if (!parent.isDirectory()) {
         throw new IllegalArgumentException(
-            String.format("The directory  %s  does not exist.", parent));
+            String.format("The directory %s does not exist.", parent));
       }
     }
     this.propertyFile = f.getPath();
@@ -1254,7 +1254,7 @@ public class AgentImpl implements org.apache.geode.admin.jmx.Agent,
       // is still usable:
       SystemFailure.checkFailure();
       logger.error("Failed to start SnmpAdaptor:  {}", t.getMessage());
-      throw new StartupException(String.format("Failed to start SnmpAdaptor:  %s",
+      throw new StartupException(String.format("Failed to start SnmpAdaptor: %s",
           t.getMessage()), t);
     }
   }
@@ -1335,7 +1335,7 @@ public class AgentImpl implements org.apache.geode.admin.jmx.Agent,
       // is still usable:
       SystemFailure.checkFailure();
       logger.error("Failed to start HttpAdaptor:  {}", t.getMessage());
-      throw new StartupException(String.format("Failed to start HttpAdaptor:  %s",
+      throw new StartupException(String.format("Failed to start HttpAdaptor: %s",
           t.getMessage()), t);
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentLauncher.java b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentLauncher.java
index 63396ff..899dc75 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentLauncher.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentLauncher.java
@@ -135,7 +135,7 @@ public class AgentLauncher {
     Properties props = AgentConfigImpl.getDefaultValuesForAllProperties();
 
     out.println("\n");
-    out.println("Agent configuration properties".toString());
+    out.println("Agent configuration properties");
 
     SortedMap<String, String> map = new TreeMap<String, String>();
 
@@ -148,8 +148,7 @@ public class AgentLauncher {
       }
 
       map.put(prop,
-          AgentConfigImpl.getPropertyDescription(prop) + " ("
-              + "Default" + "  \""
+          AgentConfigImpl.getPropertyDescription(prop) + " (Default  \""
               + props.getProperty(prop) + "\")");
     }
 
@@ -483,7 +482,7 @@ public class AgentLauncher {
         processDirOption(options, arg.substring("-dir=".length()));
       } else {
         throw new Exception(
-            String.format("Unknown argument:  %s", arg));
+            String.format("Unknown argument: %s", arg));
       }
     }
 
@@ -753,7 +752,7 @@ public class AgentLauncher {
 
     if (!workingDirectory.exists()) {
       throw new FileNotFoundException(
-          String.format("The input working directory does not exist:  %s",
+          String.format("The input working directory does not exist: %s",
               dirValue));
     }
 
@@ -777,19 +776,19 @@ public class AgentLauncher {
     out.println("Starts the GemFire JMX Agent");
     out.println("\t"
         + "<vmarg> a VM-option passed to the agent's VM, example -J-Xmx1024M for a 1 Gb heap");
-    out.println("\t" + "<dir> Directory in which agent runs, default is the current directory");
-    out.println("\t" + "<prop> A configuration property/value passed to the agent");
-    out.println("\t" + "(see help config for more details)");
+    out.println("\t<dir> Directory in which agent runs, default is the current directory");
+    out.println("\t<prop> A configuration property/value passed to the agent");
+    out.println("\t(see help config for more details)");
     out.println();
 
     out.println("agent stop [-dir=<dir>]");
     out.println("Stops a GemFire JMX Agent");
-    out.println("\t" + "<dir> Directory in which agent runs, default is the current directory");
+    out.println("\t<dir> Directory in which agent runs, default is the current directory");
     out.println("");
     out.println("agent status [-dir=<dir>]");
     out.println(
         "Reports the status and the process id of a GemFire JMX Agent");
-    out.println("\t" + "<dir> Directory in which agent runs, default is the current directory");
+    out.println("\t<dir> Directory in which agent runs, default is the current directory");
     out.println();
 
     ExitCode.FATAL.doSystemExit();
diff --git a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/GenerateMBeanHTML.java b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/GenerateMBeanHTML.java
index 9ee6aa6..d361428 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/GenerateMBeanHTML.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/GenerateMBeanHTML.java
@@ -148,7 +148,7 @@ public class GenerateMBeanHTML extends DefaultHandler {
         result = new InputSource(stream);
       } else {
         throw new SAXNotRecognizedException(
-            String.format("DTD not found:  %s", location));
+            String.format("DTD not found: %s", location));
       }
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/MBeanUtil.java b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/MBeanUtil.java
index 908ee54..b4b59fe 100755
--- a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/MBeanUtil.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/MBeanUtil.java
@@ -698,7 +698,7 @@ public class MBeanUtil {
               if (entry == null)
                 return;
               if (!(entry instanceof ManagedResource)) {
-                throw new ClassCastException(String.format("%s  is not a ManagedResource",
+                throw new ClassCastException(String.format("%s is not a ManagedResource",
                     new Object[] {entry.getClass().getName()}));
               }
               ManagedResource resource = (ManagedResource) entry;
diff --git a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/MX4JModelMBean.java b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/MX4JModelMBean.java
index f897a98..d3f70fb 100755
--- a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/MX4JModelMBean.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/MX4JModelMBean.java
@@ -358,13 +358,13 @@ public class MX4JModelMBean implements ModelMBean, MBeanRegistration, Notificati
     ModelMBeanAttributeInfo attrInfo = info.getAttribute(attribute);
     if (attrInfo == null)
       throw new AttributeNotFoundException(
-          String.format("Cannot find ModelMBeanAttributeInfo for attribute  %s",
+          String.format("Cannot find ModelMBeanAttributeInfo for attribute %s",
               attribute));
     if (logger.isEnabledFor(Logger.DEBUG))
       logger.debug("Attribute info is: " + attrInfo);
     if (!attrInfo.isReadable())
       throw new AttributeNotFoundException(
-          String.format("Attribute  %s  is not readable", attribute));
+          String.format("Attribute %s is not readable", attribute));
 
     // This returns a clone of the mbean descriptor, we use it read only
     Descriptor mbeanDescriptor = info.getMBeanDescriptor();
@@ -378,7 +378,7 @@ public class MX4JModelMBean implements ModelMBean, MBeanRegistration, Notificati
     Descriptor attributeDescriptor = attrInfo.getDescriptor();
     if (attributeDescriptor == null)
       throw new AttributeNotFoundException(
-          String.format("Attribute descriptor for attribute  %s  cannot be null",
+          String.format("Attribute descriptor for attribute %s cannot be null",
               attribute));
     if (logger.isEnabledFor(Logger.DEBUG))
       logger.debug("Attribute descriptor is: " + attributeDescriptor);
@@ -514,14 +514,14 @@ public class MX4JModelMBean implements ModelMBean, MBeanRegistration, Notificati
     ModelMBeanAttributeInfo attrInfo = info.getAttribute(attrName);
     if (attrInfo == null)
       throw new AttributeNotFoundException(
-          String.format("Cannot find ModelMBeanAttributeInfo for attribute  %s",
+          String.format("Cannot find ModelMBeanAttributeInfo for attribute %s",
               attrName));
     if (logger.isEnabledFor(Logger.DEBUG))
       logger.debug("Attribute info is: " + attrInfo);
 
     if (!attrInfo.isWritable())
       throw new AttributeNotFoundException(
-          String.format("Attribute  %s  is not writable", attrName));
+          String.format("Attribute %s is not writable", attrName));
 
     // This returns a clone of the mbean descriptor, we use it read only
     Descriptor mbeanDescriptor = info.getMBeanDescriptor();
@@ -535,7 +535,7 @@ public class MX4JModelMBean implements ModelMBean, MBeanRegistration, Notificati
     Descriptor attributeDescriptor = attrInfo.getDescriptor();
     if (attributeDescriptor == null)
       throw new AttributeNotFoundException(
-          String.format("Attribute descriptor for attribute  %s  cannot be null",
+          String.format("Attribute descriptor for attribute %s cannot be null",
               attrName));
     if (logger.isEnabledFor(Logger.DEBUG))
       logger.debug("Attribute descriptor is: " + attributeDescriptor);
@@ -641,7 +641,7 @@ public class MX4JModelMBean implements ModelMBean, MBeanRegistration, Notificati
     ModelMBeanOperationInfo operInfo = info.getOperation(method);
     if (operInfo == null)
       throw new MBeanException(new ServiceNotFoundException(
-          String.format("Cannot find ModelMBeanOperationInfo for operation  %s",
+          String.format("Cannot find ModelMBeanOperationInfo for operation %s",
               method)));
     if (logger.isEnabledFor(Logger.DEBUG))
       logger.debug("Operation info is: " + operInfo);
@@ -650,12 +650,12 @@ public class MX4JModelMBean implements ModelMBean, MBeanRegistration, Notificati
     Descriptor operationDescriptor = operInfo.getDescriptor();
     if (operationDescriptor == null)
       throw new MBeanException(new ServiceNotFoundException(
-          String.format("Operation descriptor for operation  %s  cannot be null",
+          String.format("Operation descriptor for operation %s cannot be null",
               method)));
     String role = (String) operationDescriptor.getFieldValue("role");
     if (role == null || !role.equals("operation"))
       throw new MBeanException(new ServiceNotFoundException(
-          String.format("Operation descriptor field 'role' must be 'operation', not  %s",
+          String.format("Operation descriptor field 'role' must be 'operation', not %s",
               role)));
     if (logger.isEnabledFor(Logger.DEBUG))
       logger.debug("Operation descriptor is: " + operationDescriptor);
diff --git a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/SystemMemberCacheJmxImpl.java b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/SystemMemberCacheJmxImpl.java
index f57429f..1150a5f 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/SystemMemberCacheJmxImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/SystemMemberCacheJmxImpl.java
@@ -309,7 +309,7 @@ public class SystemMemberCacheJmxImpl extends org.apache.geode.admin.internal.Sy
   }
 
   /**
-   * Returns the MBean <code>ObjectName</code>s for all bridge servers that serve this cache.
+   * Returns the MBean <code>ObjectName</code>s for all cache servers that serve this cache.
    *
    * @since GemFire 4.0
    * @deprecated as of 5.7
diff --git a/geode-core/src/main/java/org/apache/geode/cache/DataPolicy.java b/geode-core/src/main/java/org/apache/geode/cache/DataPolicy.java
index d8b793e..5ba6e53 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/DataPolicy.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/DataPolicy.java
@@ -131,7 +131,7 @@ public class DataPolicy implements java.io.Serializable {
     }
     if (VALUES[ordinal] != null) {
       throw new IllegalArgumentException(
-          String.format("Ordinal %s is already defined by  %s",
+          String.format("Ordinal %s is already defined by %s",
               new Object[] {Integer.valueOf(ordinal), VALUES[ordinal]}));
     }
     this.name = name;
diff --git a/geode-core/src/main/java/org/apache/geode/cache/DiskWriteAttributesFactory.java b/geode-core/src/main/java/org/apache/geode/cache/DiskWriteAttributesFactory.java
index e1a0713..0ff80a9 100755
--- a/geode-core/src/main/java/org/apache/geode/cache/DiskWriteAttributesFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/DiskWriteAttributesFactory.java
@@ -145,7 +145,7 @@ public class DiskWriteAttributesFactory implements java.io.Serializable {
     if (maxOplogSize < 0) {
       throw new IllegalArgumentException(
           String.format(
-              "Maximum Oplog size specified has to be a non-negative number and the value given  %s  is not acceptable",
+              "Maximum Oplog size specified has to be a non-negative number and the value given %s is not acceptable",
               Long.valueOf(maxOplogSize)));
     }
     this.props.setProperty(CacheXml.MAX_OPLOG_SIZE, String.valueOf(maxOplogSize));
@@ -168,7 +168,7 @@ public class DiskWriteAttributesFactory implements java.io.Serializable {
     if (maxOplogSize < 0) {
       throw new IllegalArgumentException(
           String.format(
-              "Maximum Oplog size specified has to be a non-negative number and the value given  %s  is not acceptable",
+              "Maximum Oplog size specified has to be a non-negative number and the value given %s is not acceptable",
               Integer.valueOf(maxOplogSize)));
     }
     long maxOplogSizeInBytes = convertToBytes(maxOplogSize);
@@ -200,7 +200,7 @@ public class DiskWriteAttributesFactory implements java.io.Serializable {
     if (timeInterval < 0) {
       throw new IllegalArgumentException(
           String.format(
-              "Time Interval specified has to be a non-negative number and the value given  %s  is not acceptable",
+              "Time Interval specified has to be a non-negative number and the value given %s is not acceptable",
               Long.valueOf(timeInterval)));
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/cache/LossAction.java b/geode-core/src/main/java/org/apache/geode/cache/LossAction.java
index 82f906a..873257c 100755
--- a/geode-core/src/main/java/org/apache/geode/cache/LossAction.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/LossAction.java
@@ -97,7 +97,7 @@ public class LossAction implements Serializable {
   public static LossAction fromName(String name) {
     if (name == null || name.length() == 0) {
       throw new IllegalArgumentException(
-          String.format("Invalid LossAction name:  %s", name));
+          String.format("Invalid LossAction name: %s", name));
     }
     for (int i = 0; i < PRIVATE_VALUES.length; i++) {
       if (name.equals(PRIVATE_VALUES[i].name)) {
@@ -105,7 +105,7 @@ public class LossAction implements Serializable {
       }
     }
     throw new IllegalArgumentException(
-        String.format("Invalid LossAction name:  %s", name));
+        String.format("Invalid LossAction name: %s", name));
   }
 
   /** Returns true if this is <code>NO_ACCESS</code>. */
diff --git a/geode-core/src/main/java/org/apache/geode/cache/RegionFactory.java b/geode-core/src/main/java/org/apache/geode/cache/RegionFactory.java
index 543e280..39be0f2 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/RegionFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/RegionFactory.java
@@ -109,7 +109,7 @@ public class RegionFactory<K, V> {
     this.cache = cache;
     RegionAttributes<K, V> ra = getCache().getRegionAttributes(regionAttributesId);
     if (ra == null) {
-      throw new IllegalStateException(String.format("No attributes associated with  %s",
+      throw new IllegalStateException(String.format("No attributes associated with %s",
           regionAttributesId));
     }
     this.attrsFactory = new AttributesFactory<K, V>(ra);
diff --git a/geode-core/src/main/java/org/apache/geode/cache/ResumptionAction.java b/geode-core/src/main/java/org/apache/geode/cache/ResumptionAction.java
index 1aae543..44307c1 100755
--- a/geode-core/src/main/java/org/apache/geode/cache/ResumptionAction.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/ResumptionAction.java
@@ -74,7 +74,7 @@ public class ResumptionAction implements java.io.Serializable {
   public static ResumptionAction fromName(String name) {
     if (name == null || name.length() == 0) {
       throw new IllegalArgumentException(
-          String.format("Invalid ResumptionAction name:  %s",
+          String.format("Invalid ResumptionAction name: %s",
               name));
     }
     for (int i = 0; i < PRIVATE_VALUES.length; i++) {
@@ -83,7 +83,7 @@ public class ResumptionAction implements java.io.Serializable {
       }
     }
     throw new IllegalArgumentException(
-        String.format("Invalid ResumptionAction name:  %s", name));
+        String.format("Invalid ResumptionAction name: %s", name));
   }
 
   /** Returns true if this is <code>NONE</code>. */
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractCompiledValue.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractCompiledValue.java
index ae85b93..d7b0a51 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractCompiledValue.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractCompiledValue.java
@@ -268,7 +268,7 @@ public abstract class AbstractCompiledValue implements CompiledValue, Filter, OQ
       CompiledValue v = (CompiledValue) itr.next();
       if (v == null) {
         throw new NullPointerException(
-            String.format("Got null as a child from  %s",
+            String.format("Got null as a child from %s",
                 this));
       }
       v.getRegionsInQuery(regionsInQuery, parameters);
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AttributeDescriptor.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AttributeDescriptor.java
index bff7ae7..1b5ae6c 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AttributeDescriptor.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AttributeDescriptor.java
@@ -154,7 +154,7 @@ public class AttributeDescriptor {
 
     if (m == null) {
       throw new NameNotFoundException(
-          String.format("No public attribute named ' %s ' was found in class  %s",
+          String.format("No public attribute named ' %s ' was found in class %s",
               new Object[] {_name, targetClass.getName()}));
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIn.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIn.java
index 0b72217..0619726 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIn.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIn.java
@@ -127,7 +127,7 @@ public class CompiledIn extends AbstractCompiledValue implements Indexable {
 
     if (!evalColln.getClass().isArray()) {
       throw new TypeMismatchException(
-          String.format("Operand of IN cannot be interpreted as a Collection. Is instance of  %s",
+          String.format("Operand of IN cannot be interpreted as a Collection. Is instance of %s",
               evalColln.getClass().getName()));
     }
     if (evalColln.getClass().getComponentType().isPrimitive()) {
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIndexOperation.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIndexOperation.java
index 17621b0..413d534 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIndexOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIndexOperation.java
@@ -142,7 +142,7 @@ public class CompiledIndexOperation extends AbstractCompiledValue implements Map
      * == null) { return null; } return this.evalRegionAsEntry? entry:entry.getValue(); }
      */
     throw new TypeMismatchException(
-        String.format("index expression not supported on objects of type  %s",
+        String.format("index expression not supported on objects of type %s",
             rcvr.getClass().getName()));
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIteratorDef.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIteratorDef.java
index 40599ac..81e12b4 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIteratorDef.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledIteratorDef.java
@@ -150,7 +150,7 @@ public class CompiledIteratorDef extends AbstractCompiledValue {
     if (typ != null) {
       if (!(typ instanceof CollectionType)) {
         throw new TypeMismatchException(
-            String.format("An iterator definition must be a collection type, not a  %s",
+            String.format("An iterator definition must be a collection type, not a %s",
                 typ));
       }
       if (typ instanceof MapType) { // we iterate over map entries
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledNegation.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledNegation.java
index adac8da..65b22f7 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledNegation.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledNegation.java
@@ -67,7 +67,7 @@ public class CompiledNegation extends AbstractCompiledValue {
     if (obj == null || obj == QueryService.UNDEFINED)
       return QueryService.UNDEFINED;
     throw new TypeMismatchException(
-        String.format("%s  cannot be negated", obj.getClass()));
+        String.format("%s cannot be negated", obj.getClass()));
   }
 
   @Override
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledRegion.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledRegion.java
index 704a5a0..8471804 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledRegion.java
@@ -84,7 +84,7 @@ public class CompiledRegion extends AbstractCompiledValue {
         throw new CacheClosedException();
       }
       throw new RegionNotFoundException(
-          String.format("Region not found:  %s", this.regionPath));
+          String.format("Region not found: %s", this.regionPath));
     }
 
     if (context.isCqQueryContext()) {
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledSelect.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledSelect.java
index 29d1bb4..b5a6467 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledSelect.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledSelect.java
@@ -660,7 +660,7 @@ public class CompiledSelect extends AbstractCompiledValue {
         throw new CacheClosedException();
       }
       throw new RegionNotFoundException(
-          String.format("Region not found:  %s", regionPath));
+          String.format("Region not found: %s", regionPath));
     }
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledUnaryMinus.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledUnaryMinus.java
index 39dfe9d..21f6d14 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledUnaryMinus.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledUnaryMinus.java
@@ -71,7 +71,7 @@ public class CompiledUnaryMinus extends AbstractCompiledValue {
         return Short.valueOf((short) (((Short) obj).shortValue() * -1));
     } else if (obj == null || obj == QueryService.UNDEFINED)
       return QueryService.UNDEFINED;
-    throw new TypeMismatchException(String.format("%s  cannot be unary minus",
+    throw new TypeMismatchException(String.format("%s cannot be unary minus",
         obj.getClass()));
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
index 1cb4fb0..a02b7aa 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
@@ -480,7 +480,7 @@ public class DefaultQuery implements Query {
       if (rgn == null) {
         this.cache.getCancelCriterion().checkCancelInProgress(null);
         throw new RegionNotFoundException(
-            String.format("Region not found:  %s", regionPath));
+            String.format("Region not found: %s", regionPath));
       }
       if (rgn instanceof QueryExecutor) {
         ((PartitionedRegion) rgn).checkPROffline();
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java
index ea49b72..b5d85bb 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java
@@ -285,13 +285,13 @@ public class DefaultQueryService implements InternalQueryService {
     } else {
       throw new RegionNotFoundException(
           String.format(
-              "DefaultQueryService::createIndex:First Iterator of Index >From Clause does not evaluate to a Region Path. The from clause used for Index creation is  %s",
+              "DefaultQueryService::createIndex:First Iterator of Index >From Clause does not evaluate to a Region Path. The from clause used for Index creation is %s",
               fromClause));
     }
     Region region = cache.getRegion(regionPath);
     if (region == null) {
       throw new RegionNotFoundException(
-          String.format("Region ' %s ' not found: from  %s",
+          String.format("Region ' %s ' not found: from %s",
               new Object[] {regionPath, fromClause}));
     }
     return region;
@@ -442,7 +442,7 @@ public class DefaultQueryService implements InternalQueryService {
       try {
         ((PartitionedRegion) region).removeIndex(index, false);
       } catch (ForceReattemptException ex) {
-        logger.info(String.format("Exception removing index :  %s", ex));
+        logger.info(String.format("Exception removing index : %s", ex));
       }
       return;
     }
@@ -489,7 +489,7 @@ public class DefaultQueryService implements InternalQueryService {
         ((PartitionedRegion) region).removeIndexes(false);
       } catch (ForceReattemptException ex) {
         // will have to throw a proper exception relating to remove index.
-        logger.info(String.format("Exception removing index :  %s", ex));
+        logger.info(String.format("Exception removing index : %s", ex));
       }
     }
     IndexManager indexManager = IndexUtils.getIndexManager(cache, region, false);
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/ExecutionContext.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/ExecutionContext.java
index 2deca17..3385530 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/ExecutionContext.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/ExecutionContext.java
@@ -351,7 +351,7 @@ public class ExecutionContext {
       if (mustBeMethod)
         throw new AmbiguousNameException(
             String.format(
-                "Method named ' %s ' with  %s  arguments is ambiguous because it can apply to more than one variable in scope.",
+                "Method named ' %s ' with %s arguments is ambiguous because it can apply to more than one variable in scope.",
                 name, numArgs));
       throw new AmbiguousNameException(
           String.format(
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/Functions.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/Functions.java
index 66d76d1..20a6383 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/Functions.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/Functions.java
@@ -78,7 +78,7 @@ public class Functions {
 
     } catch (Exception ex) {
       throw new QueryInvalidException(
-          String.format("Malformed date format string as the format is  %s",
+          String.format("Malformed date format string as the format is %s",
               format),
           ex);
     }
@@ -192,7 +192,7 @@ public class Functions {
   private static void checkSingleton(int size) throws FunctionDomainException {
     if (size != 1)
       throw new FunctionDomainException(
-          String.format("element() applied to parameter of size  %s",
+          String.format("element() applied to parameter of size %s",
               Integer.valueOf(size)));
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/MethodDispatch.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/MethodDispatch.java
index d9febe3..c28acd8 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/MethodDispatch.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/MethodDispatch.java
@@ -125,7 +125,7 @@ public class MethodDispatch {
     if (candidates.isEmpty()) {
       throw new NameNotFoundException(
           String.format(
-              "No applicable and accessible method named ' %s ' was found in class ' %s ' for the argument types  %s",
+              "No applicable and accessible method named ' %s ' was found in class ' %s ' for the argument types %s",
 
               new Object[] {_methodName, _targetClass.getName(), Arrays.asList(_argTypes)}));
     }
@@ -153,7 +153,7 @@ public class MethodDispatch {
     if (equalSpecificity(meth1, meth2, _argTypes))
       throw new AmbiguousNameException(
           String.format(
-              "Two or more maximally specific methods were found for the method named ' %s ' in class ' %s ' for the argument types:  %s",
+              "Two or more maximally specific methods were found for the method named ' %s ' in class ' %s ' for the argument types: %s",
               new Object[] {meth1.getName(), _targetClass.getName(),
                   Arrays.asList(_argTypes)}));
 
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QCompiler.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QCompiler.java
index 1ccb8d0..245e7c8 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QCompiler.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QCompiler.java
@@ -82,7 +82,7 @@ public class QCompiler implements OQLLexerTokenTypes {
     } catch (Exception ex) { // This is to make sure that we are wrapping any antlr exception with
                              // GemFire Exception.
       throw new QueryInvalidException(
-          String.format("Syntax error in query:  %s", ex.getMessage()),
+          String.format("Syntax error in query: %s", ex.getMessage()),
           ex);
     }
     Assert.assertTrue(stackSize() == 1, "stack size = " + stackSize());
@@ -103,7 +103,7 @@ public class QCompiler implements OQLLexerTokenTypes {
     } catch (Exception ex) { // This is to make sure that we are wrapping any antlr exception with
                              // GemFire Exception.
       throw new QueryInvalidException(
-          String.format("Syntax error in query:  %s", ex.getMessage()),
+          String.format("Syntax error in query: %s", ex.getMessage()),
           ex);
     }
     Assert.assertTrue(stackSize() == 1, "stack size = " + stackSize());
@@ -129,7 +129,7 @@ public class QCompiler implements OQLLexerTokenTypes {
     } catch (Exception ex) { // This is to make sure that we are wrapping any antlr exception with
                              // GemFire Exception.
       throw new QueryInvalidException(
-          String.format("Syntax error in query:  %s", ex.getMessage()),
+          String.format("Syntax error in query: %s", ex.getMessage()),
           ex);
     }
     Assert.assertTrue(stackSize() == 1, "stack size = " + stackSize() + ";stack=" + this.stack);
@@ -194,7 +194,7 @@ public class QCompiler implements OQLLexerTokenTypes {
     } catch (Exception ex) { // This is to make sure that we are wrapping any antlr exception with
                              // GemFire Exception.
       throw new QueryInvalidException(
-          String.format("Syntax error in query:  %s", ex.getMessage()),
+          String.format("Syntax error in query: %s", ex.getMessage()),
           ex);
     }
     Assert.assertTrue(stackSize() == 0, "stack size = " + stackSize() + ";stack=" + this.stack);
@@ -433,7 +433,7 @@ public class QCompiler implements OQLLexerTokenTypes {
       }
     } else {
       if (!this.isForIndexCompilation) {
-        throw new QueryInvalidException(String.format("Syntax error in query:  %s",
+        throw new QueryInvalidException(String.format("Syntax error in query: %s",
             "* use incorrect"));
       }
       push(new CompiledIndexOperation(rcvr, indexExpr));
@@ -691,7 +691,7 @@ public class QCompiler implements OQLLexerTokenTypes {
       resultClass = InternalDataSerializer.getCachedClass(typeName);
     } catch (ClassNotFoundException e) {
       throw new QueryInvalidException(
-          String.format("Type not found:  %s", typeName), e);
+          String.format("Type not found: %s", typeName), e);
     }
     if (logger.isTraceEnabled()) {
       logger.trace("QCompiler.resolveType= {}", resultClass.getName());
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/StructBag.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/StructBag.java
index 1dfe67c..2458c5d 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/StructBag.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/StructBag.java
@@ -76,7 +76,7 @@ public class StructBag extends ResultsBag implements StructFields {
       // throws ClassCastException if not Object[]
       // compute hash code based on all elements
       if (!(o instanceof Object[])) {
-        throw new ClassCastException(String.format("Expected an Object[], but actual is  %s",
+        throw new ClassCastException(String.format("Expected an Object[], but actual is %s",
             o.getClass().getName()));
       }
       Object[] oa = (Object[]) o;
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/StructSet.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/StructSet.java
index 5d83238..95c8c3e 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/StructSet.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/StructSet.java
@@ -194,7 +194,7 @@ public class StructSet /* extends ObjectOpenCustomHashSet */ implements Set, Sel
     StructImpl s = (StructImpl) obj;
     if (!s.getStructType().equals(this.structType)) {
       throw new IllegalArgumentException(
-          String.format("obj does not have the same StructType: required:  %s , actual:  %s",
+          String.format("obj does not have the same StructType: required: %s , actual: %s",
               new Object[] {this.structType, s.getStructType()}));
     }
     return addFieldValues(s.getFieldValues());
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java
index fe4eb84..0db2c47 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java
@@ -316,7 +316,7 @@ public class IndexManager {
         if (isOverFlowRegion()) {
           throw new UnsupportedOperationException(
               String.format(
-                  "The specified index conditions are not supported for regions which overflow to disk. The region involved is  %s",
+                  "The specified index conditions are not supported for regions which overflow to disk. The region involved is %s",
                   region.getFullPath()));
         }
         // OffHeap is not supported with range index.
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java
index 44df128..cef736b 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java
@@ -230,7 +230,7 @@ public class RangeIndex extends AbstractIndex {
           }
         }
       } catch (Exception ex) {
-        throw new IMQException(String.format("Could not add object of type  %s",
+        throw new IMQException(String.format("Could not add object of type %s",
             oldkeys.getClass().getName()), ex);
       }
 
@@ -280,7 +280,7 @@ public class RangeIndex extends AbstractIndex {
             }
           } while (retry);
         } catch (TypeMismatchException ex) {
-          throw new IMQException(String.format("Could not add object of type  %s",
+          throw new IMQException(String.format("Could not add object of type %s",
               key.getClass().getName()), ex);
         }
       } else {
@@ -328,7 +328,7 @@ public class RangeIndex extends AbstractIndex {
               }
             } while (retry);
           } catch (TypeMismatchException ex) {
-            throw new IMQException(String.format("Could not add object of type  %s",
+            throw new IMQException(String.format("Could not add object of type %s",
                 key.getClass().getName()), ex);
           }
         } // for loop for keys
@@ -482,7 +482,7 @@ public class RangeIndex extends AbstractIndex {
         this.entryToValuesMap.add(entry, newKey);
         this.internalIndexStats.incNumValues(1);
       } catch (TypeMismatchException ex) {
-        throw new IMQException(String.format("Could not add object of type  %s",
+        throw new IMQException(String.format("Could not add object of type %s",
             key.getClass().getName()), ex);
       }
     }
@@ -840,7 +840,7 @@ public class RangeIndex extends AbstractIndex {
         }
         default: {
           throw new IllegalArgumentException(
-              String.format("Operator,  %s", valueOf(operator)));
+              String.format("Operator, %s", valueOf(operator)));
         }
       } // end switch
     } catch (ClassCastException ex) {
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/parse/ASTLiteral.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/parse/ASTLiteral.java
index 77c58c4..13b9b41 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/parse/ASTLiteral.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/parse/ASTLiteral.java
@@ -75,7 +75,7 @@ public class ASTLiteral extends GemFireAST {
       return Integer.valueOf(s);
     } catch (NumberFormatException e) {
       throw new QueryInvalidException(
-          String.format("unable to parse integer:  %s", s), e);
+          String.format("unable to parse integer: %s", s), e);
     }
   }
 
@@ -89,7 +89,7 @@ public class ASTLiteral extends GemFireAST {
       return Long.valueOf(s);
     } catch (NumberFormatException e) {
       throw new QueryInvalidException(
-          String.format("Unable to parse float  %s", s));
+          String.format("Unable to parse float %s", s));
     }
   }
 
@@ -116,7 +116,7 @@ public class ASTLiteral extends GemFireAST {
       return Double.valueOf(s);
     } catch (NumberFormatException e) {
       throw new QueryInvalidException(
-          String.format("Unable to parse double  %s", s));
+          String.format("Unable to parse double %s", s));
     }
   }
 
@@ -165,7 +165,7 @@ public class ASTLiteral extends GemFireAST {
       return date;
     } catch (IllegalArgumentException e) {
       throw new QueryInvalidException(
-          String.format("Illegal format for DATE literal:  %s . Expected format is yyyy-mm-dd",
+          String.format("Illegal format for DATE literal: %s . Expected format is yyyy-mm-dd",
               s));
     }
   }
@@ -192,7 +192,7 @@ public class ASTLiteral extends GemFireAST {
       return time;
     } catch (IllegalArgumentException e) {
       throw new QueryInvalidException(
-          String.format("Illegal format for TIME literal:  %s . Expected format is hh:mm:ss",
+          String.format("Illegal format for TIME literal: %s . Expected format is hh:mm:ss",
               s));
     }
   }
@@ -245,7 +245,7 @@ public class ASTLiteral extends GemFireAST {
     } catch (IllegalArgumentException e) {
       throw new QueryInvalidException(
           String.format(
-              "Illegal format for TIMESTAMP literal:  %s . Expected format is yyyy-mm-dd hh:mm:ss.fffffffff",
+              "Illegal format for TIMESTAMP literal: %s . Expected format is yyyy-mm-dd hh:mm:ss.fffffffff",
               s));
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/parse/ASTUnsupported.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/parse/ASTUnsupported.java
index 027e927..3203d52 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/parse/ASTUnsupported.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/parse/ASTUnsupported.java
@@ -36,6 +36,6 @@ public class ASTUnsupported extends GemFireAST {
   @Override
   public void compile(QCompiler compiler) {
     throw new UnsupportedOperationException(
-        String.format("Unsupported feature:  %s", getText()));
+        String.format("Unsupported feature: %s", getText()));
   }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/types/StructTypeImpl.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/types/StructTypeImpl.java
index 2c2a19c..be45d6b 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/types/StructTypeImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/types/StructTypeImpl.java
@@ -92,7 +92,7 @@ public class StructTypeImpl extends ObjectTypeImpl implements StructType {
       }
     }
     throw new IllegalArgumentException(
-        String.format("fieldName %s  not found", fieldName));
+        String.format("fieldName %s not found", fieldName));
   }
 
   @Override
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/types/TypeUtils.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/types/TypeUtils.java
index f11e271..8592a47 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/types/TypeUtils.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/types/TypeUtils.java
@@ -106,7 +106,7 @@ public class TypeUtils implements OQLLexerTokenTypes {
         case TOK_NE:
           return temporalResult != 0;
         default:
-          throw new IllegalArgumentException(String.format("Unknown operator:  %s",
+          throw new IllegalArgumentException(String.format("Unknown operator: %s",
               Integer.valueOf(comparator)));
       }
     }
@@ -194,7 +194,7 @@ public class TypeUtils implements OQLLexerTokenTypes {
     }
 
     if (!castClass.isInstance(castTarget)) {
-      throw new InternalGemFireError(String.format("expected instance of  %s  but was  %s",
+      throw new InternalGemFireError(String.format("expected instance of %s but was %s",
           new Object[] {castClass.getName(), castTarget.getClass().getName()}));
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/cache/server/internal/ConnectionCountProbe.java b/geode-core/src/main/java/org/apache/geode/cache/server/internal/ConnectionCountProbe.java
index 30c306d..4af00a0 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/server/internal/ConnectionCountProbe.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/server/internal/ConnectionCountProbe.java
@@ -26,7 +26,7 @@ import org.apache.geode.cache.server.ServerMetrics;
 import org.apache.geode.internal.cache.xmlcache.Declarable2;
 
 /**
- * A load probe which returns load as a function of the number of connections to the bridge server.
+ * A load probe which returns load as a function of the number of connections to the cache server.
  *
  * The Load object returned by this probe reports the connection load as the number of connections
  * to this server divided by the max connections for this server. This means that servers with a
@@ -34,7 +34,7 @@ import org.apache.geode.internal.cache.xmlcache.Declarable2;
  * The load therefore is a number between 0 and 1, where 0 means there are are no connections, and 1
  * means the server at max connections.
  *
- * The queue load is reported simply as the number of queues hosted by this bridge server.
+ * The queue load is reported simply as the number of queues hosted by this cache server.
  *
  *
  * @since GemFire 5.7
@@ -45,7 +45,7 @@ public class ConnectionCountProbe extends ServerLoadProbeAdapter
   private static final long serialVersionUID = -5072528455996471323L;
 
   /**
-   * Get a loads object representing the number of connections to this bridge server
+   * Get a loads object representing the number of connections to this cache server
    */
   public ServerLoad getLoad(ServerMetrics metrics) {
     float load = metrics.getConnectionCount() / (float) metrics.getMaxConnections();
diff --git a/geode-core/src/main/java/org/apache/geode/cache/server/internal/LoadMonitor.java b/geode-core/src/main/java/org/apache/geode/cache/server/internal/LoadMonitor.java
index a30a21e..973c341 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/server/internal/LoadMonitor.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/server/internal/LoadMonitor.java
@@ -34,7 +34,7 @@ import org.apache.geode.internal.cache.tier.sockets.ConnectionListener;
 import org.apache.geode.internal.logging.LogService;
 
 /**
- * A class which monitors the load on a bridge server and periodically sends updates to the locator.
+ * A class which monitors the load on a cache server and periodically sends updates to the locator.
  *
  * @since GemFire 5.7
  *
@@ -192,7 +192,7 @@ public class LoadMonitor implements ConnectionListener {
             Set locators = advisor.adviseControllers();
 
             if (logger.isDebugEnabled()) {
-              logger.debug("Bridge Server Load Monitor Transmitting load {} to locators {}", load,
+              logger.debug("cache server Load Monitor Transmitting load {} to locators {}", load,
                   locators);
             }
 
@@ -212,7 +212,7 @@ public class LoadMonitor implements ConnectionListener {
           } else {
             if (logger.isDebugEnabled()) {
               logger.debug(
-                  "Bridge Server Load Monitor Load {} hasn't changed, not transmitting. skippedLoadUpdates={}",
+                  "cache server Load Monitor Load {} hasn't changed, not transmitting. skippedLoadUpdates={}",
                   load, skippedLoadUpdates);
             }
           }
diff --git a/geode-core/src/main/java/org/apache/geode/cache/server/internal/ServerMetricsImpl.java b/geode-core/src/main/java/org/apache/geode/cache/server/internal/ServerMetricsImpl.java
index 6baa71c..0f8210c 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/server/internal/ServerMetricsImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/server/internal/ServerMetricsImpl.java
@@ -19,7 +19,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.geode.cache.server.ServerMetrics;
 
 /**
- * Metrics describing the load on a bridge server.
+ * Metrics describing the load on a cache server.
  *
  * @since GemFire 5.7
  *
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/AbstractDistributionConfig.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/AbstractDistributionConfig.java
index aa5e5b6..da050b2 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/AbstractDistributionConfig.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/AbstractDistributionConfig.java
@@ -458,7 +458,7 @@ public abstract class AbstractDistributionConfig extends AbstractConfig
 
         } catch (UnknownHostException ex) {
           throw new IllegalArgumentException(
-              String.format("Unknown locator bind address:  %s",
+              String.format("Unknown locator bind address: %s",
                   bindAddr));
         }
 
@@ -474,7 +474,7 @@ public abstract class AbstractDistributionConfig extends AbstractConfig
       if (lastIndex == -1) {
         if (locator.indexOf('[') >= 0) {
           throw new IllegalArgumentException(
-              String.format("Invalid locator:  %s",
+              String.format("Invalid locator: %s",
                   value));
 
         } else {
@@ -497,7 +497,7 @@ public abstract class AbstractDistributionConfig extends AbstractConfig
         }
       } catch (NumberFormatException ex) {
         throw new IllegalArgumentException(
-            String.format("Invalid locator:  %s", value));
+            String.format("Invalid locator: %s", value));
       }
 
       locatorsb.append('[');
@@ -735,7 +735,7 @@ public abstract class AbstractDistributionConfig extends AbstractConfig
       // null is a "valid" value for any class
       if (!validValueClass.isInstance(attValue)) {
         throw new InvalidValueException(
-            String.format("%s  value %s must be of type  %s",
+            String.format("%s value %s must be of type %s",
                 attName, attValue, validValueClass.getName()));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java
index 1b65172..6e2a978 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java
@@ -1495,7 +1495,7 @@ public class ClusterDistributionManager implements DistributionManager {
         handleManagerStartup(member);
         break;
       default:
-        throw new InternalGemFireError(String.format("Unknown member type:  %s",
+        throw new InternalGemFireError(String.format("Unknown member type: %s",
             Integer.valueOf(vmType)));
     }
   }
@@ -2265,7 +2265,7 @@ public class ClusterDistributionManager implements DistributionManager {
         if (unresponsiveCount != 0) {
           if (Boolean.getBoolean("DistributionManager.requireAllStartupResponses")) {
             throw new SystemConnectException(
-                String.format("No startup replies from:  %s",
+                String.format("No startup replies from: %s",
                     unfinishedStartups));
           }
         }
@@ -2289,7 +2289,7 @@ public class ClusterDistributionManager implements DistributionManager {
           }
           throw new SystemConnectException(
               String.format(
-                  "Received no connection acknowledgments from any of the  %s  senior cache members:  %s",
+                  "Received no connection acknowledgments from any of the %s senior cache members: %s",
 
                   new Object[] {Integer.toString(allOthers.size()), sb.toString()}));
         } // and none responded
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfigImpl.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfigImpl.java
index 26dffcf..df80af0 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfigImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfigImpl.java
@@ -1596,7 +1596,7 @@ public class DistributionConfigImpl extends AbstractDistributionConfig implement
         properties.load(url.openStream());
       } catch (IOException io) {
         throw new GemFireIOException(
-            String.format("Failed reading  %s", url), io);
+            String.format("Failed reading %s", url), io);
       }
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java
index f99849e..ecc2902 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java
@@ -384,7 +384,7 @@ public abstract class DistributionMessage implements DataSerializableFixedID, Cl
       // error condition, so you also need to check to see if the JVM
       // is still usable:
       SystemFailure.checkFailure();
-      logger.fatal(String.format("Uncaught exception processing  %s", this), t);
+      logger.fatal(String.format("Uncaught exception processing %s", this), t);
     } finally {
       if (doDecMessagesBeingReceived) {
         dm.getStats().decMessagesBeingReceived(this.bytesRead);
@@ -439,7 +439,7 @@ public abstract class DistributionMessage implements DataSerializableFixedID, Cl
         });
       } catch (RejectedExecutionException ex) {
         if (!dm.shutdownInProgress()) { // fix for bug 32395
-          logger.warn(String.format("%s  schedule() rejected", this.toString()), ex);
+          logger.warn(String.format("%s schedule() rejected", this.toString()), ex);
         }
       } catch (VirtualMachineError err) {
         SystemFailure.initiateFailure(err);
@@ -453,7 +453,7 @@ public abstract class DistributionMessage implements DataSerializableFixedID, Cl
         // error condition, so you also need to check to see if the JVM
         // is still usable:
         SystemFailure.checkFailure();
-        logger.fatal(String.format("Uncaught exception processing  %s", this), t);
+        logger.fatal(String.format("Uncaught exception processing %s", this), t);
         // I don't believe this ever happens (DJP May 2007)
         throw new InternalGemFireException(
             "Unexpected error scheduling message",
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java
index f940d32..d1a0ca2 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java
@@ -702,7 +702,7 @@ public class InternalDistributedSystem extends DistributedSystem
             "This thread is initializing a new DistributedSystem in order to reconnect to other members");
       }
       // Note we need loners to load the license in case they are a
-      // bridge server and will need to enforce the member limit
+      // cache server and will need to enforce the member limit
       if (Boolean.getBoolean(InternalLocator.FORCE_LOCATOR_DM_TYPE)) {
         this.locatorDMTypeForced = true;
       }
@@ -2222,7 +2222,7 @@ public class InternalDistributedSystem extends DistributedSystem
         if (reason != null) {
           this.listeners.remove(listener); // don't leave in the list!
           throw new DistributedSystemDisconnectedException(
-              String.format("No listeners permitted after shutdown:  %s",
+              String.format("No listeners permitted after shutdown: %s",
                   reason),
               dm.getRootCause());
         }
@@ -2876,7 +2876,7 @@ public class InternalDistributedSystem extends DistributedSystem
         }
       } catch (IOException ex) {
         throw new GemFireIOException(
-            String.format("While starting cache server  %s", server),
+            String.format("While starting cache server %s", server),
             ex);
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/ServerLocation.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/ServerLocation.java
index 6526bb5..f11b6a5 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/ServerLocation.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/ServerLocation.java
@@ -25,7 +25,7 @@ import org.apache.geode.DataSerializer;
 import org.apache.geode.internal.net.SocketCreator;
 
 /**
- * Represents the location of a bridge server. This class is preferable to InetSocketAddress because
+ * Represents the location of a cache server. This class is preferable to InetSocketAddress because
  * it doesn't do any name resolution.
  *
  *
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockGrantor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockGrantor.java
index e2e1ab2..03bec59 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockGrantor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockGrantor.java
@@ -1753,7 +1753,7 @@ public class DLockGrantor {
         break;
     }
     if (stateDesc == null) {
-      throw new IllegalArgumentException(String.format("Unknown state for grantor:  %s",
+      throw new IllegalArgumentException(String.format("Unknown state for grantor: %s",
           Integer.valueOf(state)));
     }
     return stateDesc;
@@ -1768,7 +1768,7 @@ public class DLockGrantor {
     if (this.state != INITIALIZING) {
       String stateDesc = stateToString(this.state);
       throw new IllegalStateException(
-          String.format("DLockGrantor operation only allowed when initializing, not  %s",
+          String.format("DLockGrantor operation only allowed when initializing, not %s",
               stateDesc));
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockService.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockService.java
index 40e9276..757662a 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockService.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockService.java
@@ -209,7 +209,7 @@ public class DLockService extends DistributedLockService {
    * @return the detail message that explains LockServiceDestroyedException
    */
   protected String generateLockServiceDestroyedMessage() {
-    return String.format("%s  has been destroyed", this);
+    return String.format("%s has been destroyed", this);
   }
 
   /**
@@ -1428,7 +1428,7 @@ public class DLockService extends DistributedLockService {
               reentrant = true;
               if (reentrant && disallowReentrant) {
                 throw new IllegalStateException(
-                    String.format("%s  attempted to reenter non-reentrant lock %s",
+                    String.format("%s attempted to reenter non-reentrant lock %s",
                         new Object[] {Thread.currentThread(), token}));
               }
               recursionBefore = token.getRecursion();
@@ -1750,7 +1750,7 @@ public class DLockService extends DistributedLockService {
           }
           throw new LockNotHeldException(
               String.format(
-                  "Attempting to unlock  %s  :  %s , but this thread does not own the lock.",
+                  "Attempting to unlock %s : %s , but this thread does not own the lock.",
                   new Object[] {this, name}));
         }
 
@@ -1765,7 +1765,7 @@ public class DLockService extends DistributedLockService {
             }
             throw new LockNotHeldException(
                 String.format(
-                    "Attempting to unlock  %s  :  %s , but this thread does not own the lock.  %s",
+                    "Attempting to unlock %s : %s , but this thread does not own the lock. %s",
                     new Object[] {this, name, token}));
           }
           // if recursion > 0 then token will still be locked after calling release
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/messenger/JGroupsMessenger.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
index b631f9c..736e9e7 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
@@ -196,7 +196,7 @@ public class JGroupsMessenger implements Messenger {
     is = ClassPathLoader.getLatest().getResourceAsStream(getClass(), r);
     if (is == null) {
       throw new GemFireConfigException(
-          String.format("Cannot find  %s", r));
+          String.format("Cannot find %s", r));
     }
 
     String properties;
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/mgr/GMSMembershipManager.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
index 63a6057..9fc2cee 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
@@ -528,7 +528,7 @@ public class GMSMembershipManager implements MembershipManager, Manager {
           // error condition, so you also need to check to see if the JVM
           // is still usable:
           SystemFailure.checkFailure();
-          logger.info(String.format("Membership: Fault while processing view addition of  %s",
+          logger.info(String.format("Membership: Fault while processing view addition of %s",
               m),
               t);
         }
@@ -561,7 +561,7 @@ public class GMSMembershipManager implements MembershipManager, Manager {
           // error condition, so you also need to check to see if the JVM
           // is still usable:
           SystemFailure.checkFailure();
-          logger.info(String.format("Membership: Fault while processing view removal of  %s",
+          logger.info(String.format("Membership: Fault while processing view removal of %s",
               m),
               t);
         }
@@ -1221,7 +1221,7 @@ public class GMSMembershipManager implements MembershipManager, Manager {
 
     else // sanity
       throw new InternalGemFireError(
-          String.format("unknown startup event:  %s", o));
+          String.format("unknown startup event: %s", o));
   }
 
   /**
diff --git a/geode-core/src/main/java/org/apache/geode/internal/AbstractConfig.java b/geode-core/src/main/java/org/apache/geode/internal/AbstractConfig.java
index 65b8c9e..8dc718b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/AbstractConfig.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/AbstractConfig.java
@@ -246,14 +246,14 @@ public abstract class AbstractConfig implements Config {
           attObjectValue = InetAddress.getByName(value);
         } catch (UnknownHostException ex) {
           throw new IllegalArgumentException(
-              String.format("%s  value %s must be a valid host name.  %s",
+              String.format("%s value %s must be a valid host name. %s",
                   name, value, ex.toString()));
         }
       } else if (valueType.equals(FlowControlParams.class)) {
         String[] values = value.split(",");
         if (values.length != 3) {
           throw new IllegalArgumentException(
-              String.format("%s  value %s must have three elements separated by commas",
+              String.format("%s value %s must have three elements separated by commas",
                   name, value));
         }
         int allowance;
@@ -265,7 +265,7 @@ public abstract class AbstractConfig implements Config {
           waitTime = Integer.parseInt(values[2].trim());
         } catch (NumberFormatException e) {
           throw new IllegalArgumentException(
-              String.format("%s  value %s must be composed of an integer, a float, and an integer",
+              String.format("%s value %s must be composed of an integer, a float, and an integer",
                   name, value));
         }
         attObjectValue = new FlowControlParams(allowance, threshold, waitTime);
@@ -274,7 +274,7 @@ public abstract class AbstractConfig implements Config {
         attObjectValue = commaDelimitedStringToSecurableCommunicationChannels(value);
       } else {
         throw new InternalGemFireException(
-            String.format("unhandled attribute type  %s  for %s.",
+            String.format("unhandled attribute type %s for %s.",
                 valueType, name));
       }
     } catch (NumberFormatException ex) {
@@ -323,7 +323,7 @@ public abstract class AbstractConfig implements Config {
     String[] validAttNames = getAttributeNames();
     if (!Arrays.asList(validAttNames).contains(name.toLowerCase())) {
       throw new IllegalArgumentException(
-          String.format("Unknown configuration attribute name %s. Valid attribute names are:  %s .",
+          String.format("Unknown configuration attribute name %s. Valid attribute names are: %s .",
               name, SystemAdmin.join(validAttNames)));
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/AvailablePort.java b/geode-core/src/main/java/org/apache/geode/internal/AvailablePort.java
index 34126bf..cab9b15 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/AvailablePort.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/AvailablePort.java
@@ -153,7 +153,7 @@ public class AvailablePort {
     }
 
     else {
-      throw new IllegalArgumentException(String.format("Unknown protocol:  %s",
+      throw new IllegalArgumentException(String.format("Unknown protocol: %s",
           Integer.valueOf(protocol)));
     }
   }
@@ -169,7 +169,7 @@ public class AvailablePort {
     } else if (protocol == MULTICAST) {
       throw new IllegalArgumentException("You can not keep the JGROUPS protocol");
     } else {
-      throw new IllegalArgumentException(String.format("Unknown protocol:  %s",
+      throw new IllegalArgumentException(String.format("Unknown protocol: %s",
           Integer.valueOf(protocol)));
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/HeapDataOutputStream.java b/geode-core/src/main/java/org/apache/geode/internal/HeapDataOutputStream.java
index b43996b..d07e7ef 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/HeapDataOutputStream.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/HeapDataOutputStream.java
@@ -1264,7 +1264,7 @@ public class HeapDataOutputStream extends OutputStream
     } catch (UTFDataFormatException ex) {
       // this shouldn't happen since we did not encode the length
       throw new IllegalStateException(
-          String.format("unexpected  %s", ex));
+          String.format("unexpected %s", ex));
     }
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java b/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java
index 2d66f11..3fba509 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java
@@ -846,7 +846,7 @@ public abstract class InternalDataSerializer extends DataSerializer {
   static DataSerializer newInstance(Class c) {
     if (!DataSerializer.class.isAssignableFrom(c)) {
       throw new IllegalArgumentException(
-          String.format("%s  does not extend DataSerializer.",
+          String.format("%s does not extend DataSerializer.",
               c.getName()));
     }
 
@@ -872,18 +872,18 @@ public abstract class InternalDataSerializer extends DataSerializer {
 
     } catch (IllegalAccessException ignored) {
       throw new IllegalArgumentException(
-          String.format("Could not instantiate an instance of  %s",
+          String.format("Could not instantiate an instance of %s",
               c.getName()));
 
     } catch (InstantiationException ex) {
       throw new IllegalArgumentException(
-          String.format("Could not instantiate an instance of  %s",
+          String.format("Could not instantiate an instance of %s",
               c.getName()),
           ex);
 
     } catch (InvocationTargetException ex) {
       throw new IllegalArgumentException(
-          String.format("While instantiating an instance of  %s",
+          String.format("While instantiating an instance of %s",
               c.getName()),
           ex);
     }
@@ -1023,7 +1023,7 @@ public abstract class InternalDataSerializer extends DataSerializer {
       sendRegistrationMessageToServers(s);
     }
     // send it to all cache clients irrelevant of distribute
-    // bridge servers send it all the clients irrelevant of
+    // cache servers send it all the clients irrelevant of
     // originator VM
     sendRegistrationMessageToClients(s);
 
@@ -1618,7 +1618,7 @@ public abstract class InternalDataSerializer extends DataSerializer {
       } else {
         throw new ToDataException(
             String.format(
-                "Serializer  %s  (a  %s ) said that it could serialize an instance of  %s , but its toData() method returned false.",
+                "Serializer %s (a %s ) said that it could serialize an instance of %s , but its toData() method returned false.",
                 serializer.getId(), serializer.getClass().getName(),
                 o.getClass().getName()));
       }
@@ -2013,7 +2013,7 @@ public abstract class InternalDataSerializer extends DataSerializer {
         break;
       default:
         throw new IOException(
-            String.format("Unknown TimeUnit type:  %s", type));
+            String.format("Unknown TimeUnit type: %s", type));
     }
 
     if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
@@ -2213,7 +2213,7 @@ public abstract class InternalDataSerializer extends DataSerializer {
        */
       if (disallowJavaSerialization() && o instanceof Serializable) {
         throw new NotSerializableException(
-            String.format("%s  is not DataSerializable and Java Serialization is disallowed",
+            String.format("%s is not DataSerializable and Java Serialization is disallowed",
                 o.getClass().getName()));
       }
 
@@ -2483,7 +2483,7 @@ public abstract class InternalDataSerializer extends DataSerializer {
       throw ex;
     } catch (Exception ex) {
       throw new SerializationException(
-          String.format("Could not create an instance of  %s .",
+          String.format("Could not create an instance of %s .",
               ds.getClass().getName()),
           ex);
     }
@@ -2505,7 +2505,7 @@ public abstract class InternalDataSerializer extends DataSerializer {
       throw ex;
     } catch (Exception ex) {
       throw new SerializationException(
-          String.format("Could not create an instance of  %s .",
+          String.format("Could not create an instance of %s .",
               c.getName()),
           ex);
     }
@@ -2950,7 +2950,7 @@ public abstract class InternalDataSerializer extends DataSerializer {
           "No Instantiator has been registered for class with id {}",
           classId);
       throw new IOException(
-          String.format("No Instantiator has been registered for class with id  %s",
+          String.format("No Instantiator has been registered for class with id %s",
               classId));
 
     } else {
@@ -2967,7 +2967,7 @@ public abstract class InternalDataSerializer extends DataSerializer {
 
       } catch (Exception ex) {
         throw new SerializationException(
-            String.format("Could not deserialize an instance of  %s",
+            String.format("Could not deserialize an instance of %s",
                 instantiator.getInstantiatedClass().getName()),
             ex);
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/InternalInstantiator.java b/geode-core/src/main/java/org/apache/geode/internal/InternalInstantiator.java
index 74c84d6..4448752 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/InternalInstantiator.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/InternalInstantiator.java
@@ -119,7 +119,7 @@ public class InternalInstantiator {
       if (oldId != 0 && oldId != classId) {
         throw new IllegalStateException(
             String.format(
-                "Class  %s  is already registered with id %s so it can not be registered with id %s",
+                "Class %s is already registered with id %s so it can not be registered with id %s",
 
                 new Object[] {c.getName(), Integer.valueOf(oldId), Integer.valueOf(classId)}));
       }
@@ -177,7 +177,7 @@ public class InternalInstantiator {
       sendRegistrationMessageToServers(instantiator);
     }
     // send it to all cache clients irrelevant of distribute
-    // bridge servers send it all the clients irrelevant of
+    // cache servers send it all the clients irrelevant of
     // originator VM
     sendRegistrationMessageToClients(instantiator);
 
@@ -346,7 +346,7 @@ public class InternalInstantiator {
         if (iah != null && iah.getId() != holder.getId()) {
           throw new IllegalStateException(
               String.format(
-                  "Class  %s  is already registered with id %s so it can not be registered with id %s",
+                  "Class %s is already registered with id %s so it can not be registered with id %s",
 
                   new Object[] {instantiatorClassName, iah.getId(), holder.getId()}));
         }
@@ -465,7 +465,7 @@ public class InternalInstantiator {
     final Instantiator i = (Instantiator) idsToInstantiators.remove(idx);
     if (i == null) {
       throw new IllegalArgumentException(
-          String.format("Class  %s  was not registered with id  %s",
+          String.format("Class %s was not registered with id %s",
               new Object[] {c.getName(), Integer.valueOf(classId)}));
     } else {
       dsMap.remove(c.getName(), i);
@@ -592,7 +592,7 @@ public class InternalInstantiator {
       int id) {
     if (!Instantiator.class.isAssignableFrom(instantiatorClass)) {
       throw new IllegalArgumentException(
-          String.format("%s  does not extend Instantiator.",
+          String.format("%s does not extend Instantiator.",
               instantiatorClass.getName()));
     }
 
@@ -631,19 +631,19 @@ public class InternalInstantiator {
 
     } catch (IllegalAccessException ex) {
       throw new IllegalArgumentException(
-          String.format("Could not access zero-argument constructor of  %s",
+          String.format("Could not access zero-argument constructor of %s",
               instantiatorClass.getName()));
 
     } catch (InstantiationException ex) {
       RuntimeException ex2 = new IllegalArgumentException(
-          String.format("Could not instantiate an instance of  %s",
+          String.format("Could not instantiate an instance of %s",
               instantiatorClass.getName()));
       ex2.initCause(ex);
       throw ex2;
 
     } catch (InvocationTargetException ex) {
       RuntimeException ex2 = new IllegalArgumentException(
-          String.format("While instantiating an instance of  %s",
+          String.format("While instantiating an instance of %s",
               instantiatorClass.getName()));
       ex2.initCause(ex);
       throw ex2;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/ManagerInfo.java b/geode-core/src/main/java/org/apache/geode/internal/ManagerInfo.java
index f5ca619..08923f7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/ManagerInfo.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/ManagerInfo.java
@@ -182,7 +182,7 @@ public class ManagerInfo implements DataSerializable {
       }
     }
     throw new IllegalArgumentException(
-        String.format("Unknown statusName  %s", statusName));
+        String.format("Unknown statusName %s", statusName));
   }
 
   public static ManagerInfo loadLocatorInfo(File directory) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/ObjIdMap.java b/geode-core/src/main/java/org/apache/geode/internal/ObjIdMap.java
index 0fb86bb..d1c5ca5 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/ObjIdMap.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/ObjIdMap.java
@@ -48,13 +48,13 @@ public class ObjIdMap {
    */
   public ObjIdMap(int initialCapacity, float loadFactor) {
     if (initialCapacity < 0) {
-      throw new IllegalArgumentException(String.format("Illegal Initial Capacity:  %s",
+      throw new IllegalArgumentException(String.format("Illegal Initial Capacity: %s",
           Integer.valueOf(initialCapacity)));
     }
 
     if (loadFactor <= 0 || Float.isNaN(loadFactor)) {
       throw new IllegalArgumentException(
-          String.format("Illegal Load factor:  %s", new Float(loadFactor)));
+          String.format("Illegal Load factor: %s", new Float(loadFactor)));
     }
 
     if (initialCapacity == 0) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/SystemAdmin.java b/geode-core/src/main/java/org/apache/geode/internal/SystemAdmin.java
index 446bca4..47ab289 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/SystemAdmin.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/SystemAdmin.java
@@ -273,7 +273,7 @@ public class SystemAdmin {
         addr = InetAddress.getByName(addressOption);
       } catch (UnknownHostException ex) {
         throw new IllegalArgumentException(
-            String.format("-address value was not a known IP address:  %s",
+            String.format("-address value was not a known IP address: %s",
                 ex));
       }
     }
@@ -795,7 +795,7 @@ public class SystemAdmin {
         ps = new PrintStream(new FileOutputStream(outOption));
       } catch (FileNotFoundException ex) {
         throw new GemFireIOException(
-            String.format("Could not create file %s for output because  %s",
+            String.format("Could not create file %s for output because %s",
                 outOption, getExceptionMessage(ex)));
       }
     } else {
@@ -819,7 +819,7 @@ public class SystemAdmin {
         inputs.put(shortName, new FileInputStream(fileName));
       } catch (FileNotFoundException ex) {
         throw new GemFireIOException(
-            String.format("Could not open to %s for reading because  %s",
+            String.format("Could not open to %s for reading because %s",
                 fileName, getExceptionMessage(ex)));
       }
       if (!quiet) {
@@ -1454,7 +1454,7 @@ public class SystemAdmin {
           cmd = "validate-disk-store";
         } else {
           throw new InternalGemFireException(
-              String.format("Unhandled alias  %s", cmd));
+              String.format("Unhandled alias %s", cmd));
         }
       }
     }
@@ -1791,7 +1791,7 @@ public class SystemAdmin {
       return d.getTime();
     } catch (ParseException ex) {
       throw new IllegalArgumentException(
-          String.format("Time was not in this format %s.  %s",
+          String.format("Time was not in this format %s. %s",
               new Object[] {DateFormatter.FORMAT_STRING, ex}));
     }
   }
@@ -1857,7 +1857,7 @@ public class SystemAdmin {
             outputDir = argValue;
           } else {
             throw new InternalGemFireException(
-                String.format("unexpected valid option  %s",
+                String.format("unexpected valid option %s",
                     validArgs[i]));
           }
           return true;
@@ -1888,7 +1888,7 @@ public class SystemAdmin {
         } else if (validArgs[i].equals("-all-threads")) {
           printStacksOption = arg;
         } else {
-          throw new InternalGemFireException(String.format("unexpected valid option  %s",
+          throw new InternalGemFireException(String.format("unexpected valid option %s",
               validArgs[i]));
         }
         return true;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/UniqueIdGenerator.java b/geode-core/src/main/java/org/apache/geode/internal/UniqueIdGenerator.java
index ff7be33..9cbd9c8 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/UniqueIdGenerator.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/UniqueIdGenerator.java
@@ -236,10 +236,10 @@ public class UniqueIdGenerator {
   public void release(int id) {
     if (id < 0) {
       throw new IllegalArgumentException(
-          String.format("negative id:  %s", Integer.valueOf(id)));
+          String.format("negative id: %s", Integer.valueOf(id)));
     } else if (id > this.MAX_ID) {
       throw new IllegalArgumentException(
-          String.format("id > MAX_ID:  %s", Integer.valueOf(id)));
+          String.format("id > MAX_ID: %s", Integer.valueOf(id)));
     }
     synchronized (this) {
       clearBit(id);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/CacheInfo.java b/geode-core/src/main/java/org/apache/geode/internal/admin/CacheInfo.java
index e0ca4b0..cfa89a6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/CacheInfo.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/CacheInfo.java
@@ -79,7 +79,7 @@ public interface CacheInfo {
   void setClosed();
 
   /**
-   * Returns the ids of all of the bridge servers that are associated with this cache.
+   * Returns the ids of all of the cache servers that are associated with this cache.
    *
    * @since GemFire 4.0
    */
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/GemFireVM.java b/geode-core/src/main/java/org/apache/geode/internal/admin/GemFireVM.java
index 80c0337..dca91b3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/GemFireVM.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/GemFireVM.java
@@ -300,21 +300,21 @@ public interface GemFireVM {
   CacheInfo setCacheSearchTimeout(CacheInfo c, int v) throws AdminException;
 
   /**
-   * Adds a bridge server a cache in this VM
+   * Adds a cache server a cache in this VM
    *
    * @since GemFire 4.0
    */
   AdminBridgeServer addCacheServer(CacheInfo cache) throws AdminException;
 
   /**
-   * Returns information about a bridge server that runs in this VM
+   * Returns information about a cache server that runs in this VM
    *
-   * @param id The unique {@link AdminBridgeServer#getId id} of the bridge server
+   * @param id The unique {@link AdminBridgeServer#getId id} of the cache server
    */
   AdminBridgeServer getBridgeInfo(CacheInfo cache, int id) throws AdminException;
 
   /**
-   * Starts a bridge server in this VM
+   * Starts a cache server in this VM
    *
    * @since GemFire 4.0
    */
@@ -322,7 +322,7 @@ public interface GemFireVM {
       throws AdminException;
 
   /**
-   * Stops a bridge server in this VM
+   * Stops a cache server in this VM
    *
    * @since GemFire 4.0
    */
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/ListenerIdMap.java b/geode-core/src/main/java/org/apache/geode/internal/admin/ListenerIdMap.java
index 6e53aaa..8b208af 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/ListenerIdMap.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/ListenerIdMap.java
@@ -50,7 +50,7 @@ public class ListenerIdMap {
     }
 
     if (loadFactor <= 0 || Float.isNaN(loadFactor)) {
-      throw new IllegalArgumentException(String.format("Illegal Load factor:  %s",
+      throw new IllegalArgumentException(String.format("Illegal Load factor: %s",
           new Float(loadFactor)));
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/BridgeServerRequest.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/BridgeServerRequest.java
index 5ab332f..da469dc 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/BridgeServerRequest.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/BridgeServerRequest.java
@@ -24,55 +24,55 @@ import org.apache.geode.internal.admin.CacheInfo;
 
 /**
  * A message that is sent to a VM that hosts a cache to perform an administrative operation on one
- * of its bridge servers.
+ * of its cache servers.
  *
  * @since GemFire 4.0
  */
 public class BridgeServerRequest extends AdminRequest {
 
-  /** Add a new bridge server */
+  /** Add a new cache server */
   static final int ADD_OPERATION = 10;
 
-  /** Get info about a bridge server */
+  /** Get info about a cache server */
   static final int INFO_OPERATION = 11;
 
-  /** Start a bridge server */
+  /** Start a cache server */
   static final int START_OPERATION = 12;
 
-  /** Stop a bridge server */
+  /** Stop a cache server */
   static final int STOP_OPERATION = 13;
 
   /////////////////// Instance Fields ////////////////////
 
-  /** The id of the cache in which the bridge server resides */
+  /** The id of the cache in which the cache server resides */
   private int cacheId;
 
   /** The type of operation to perform */
   private int operation;
 
-  /** Bridge server configuration info for performing an operation */
+  /** cache server configuration info for performing an operation */
   private RemoteBridgeServer bridgeInfo;
 
-  /** The id of bridge server to get information about */
+  /** The id of cache server to get information about */
   private int bridgeId;
 
   //////////////////// Static Methods ////////////////////
 
   /**
-   * Creates a <code>BridgeServerRequest</code> for adding a new bridge server.
+   * Creates a <code>BridgeServerRequest</code> for adding a new cache server.
    */
   public static BridgeServerRequest createForAdd(CacheInfo cache) {
     BridgeServerRequest request = new BridgeServerRequest();
     request.cacheId = cache.getId();
     request.operation = ADD_OPERATION;
     request.friendlyName =
-        "Add bridge server";
+        "Add cache server";
     request.bridgeInfo = null;
     return request;
   }
 
   /**
-   * Creates a <code>BridgeServerRequest</code> for adding a new bridge server.
+   * Creates a <code>BridgeServerRequest</code> for adding a new cache server.
    */
   public static BridgeServerRequest createForInfo(CacheInfo cache, int id) {
     BridgeServerRequest request = new BridgeServerRequest();
@@ -86,7 +86,7 @@ public class BridgeServerRequest extends AdminRequest {
   }
 
   /**
-   * Creates a <code>BridgeServerRequest</code> for starting a bridge server.
+   * Creates a <code>BridgeServerRequest</code> for starting a cache server.
    */
   public static BridgeServerRequest createForStart(CacheInfo cache, RemoteBridgeServer bridge) {
     BridgeServerRequest request = new BridgeServerRequest();
@@ -99,7 +99,7 @@ public class BridgeServerRequest extends AdminRequest {
   }
 
   /**
-   * Creates a <code>BridgeServerRequest</code> for stopping a bridge server.
+   * Creates a <code>BridgeServerRequest</code> for stopping a cache server.
    */
   public static BridgeServerRequest createForStop(CacheInfo cache, RemoteBridgeServer bridge) {
     BridgeServerRequest request = new BridgeServerRequest();
@@ -117,7 +117,7 @@ public class BridgeServerRequest extends AdminRequest {
   private static String getOperationDescription(int op) {
     switch (op) {
       case ADD_OPERATION:
-        return "Add bridge server";
+        return "Add cache server";
       case INFO_OPERATION:
         return "Get info about cache server";
       default:
@@ -137,7 +137,7 @@ public class BridgeServerRequest extends AdminRequest {
   }
 
   /**
-   * Returns the id of the cache in which the bridge server resides
+   * Returns the id of the cache in which the cache server resides
    */
   int getCacheId() {
     return this.cacheId;
@@ -151,7 +151,7 @@ public class BridgeServerRequest extends AdminRequest {
   }
 
   /**
-   * Returns the id of the bridge server for which information is requested.
+   * Returns the id of the cache server for which information is requested.
    */
   int getBridgeId() {
     return this.bridgeId;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/BridgeServerResponse.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/BridgeServerResponse.java
index ddc102d..5aaeb9a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/BridgeServerResponse.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/BridgeServerResponse.java
@@ -29,13 +29,13 @@ import org.apache.geode.internal.cache.InternalCache;
 
 /**
  * A message that is sent in response to a {@link BridgeServerResponse}. It perform an operation on
- * a bridge server and returns the result to the sender.
+ * a cache server and returns the result to the sender.
  *
  * @since GemFire 4.0
  */
 public class BridgeServerResponse extends AdminResponse {
 
-  /** Information about the bridge server that was operated on */
+  /** Information about the cache server that was operated on */
   private RemoteBridgeServer bridgeInfo;
 
   /** An exception thrown while performing the operation */
@@ -114,7 +114,7 @@ public class BridgeServerResponse extends AdminResponse {
           }
 
           default:
-            Assert.assertTrue(false, "Unknown bridge server operation: " + operation);
+            Assert.assertTrue(false, "Unknown cache server operation: " + operation);
         }
       }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/DistributionLocatorId.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/DistributionLocatorId.java
index 3ae38fe..245636a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/DistributionLocatorId.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/DistributionLocatorId.java
@@ -136,7 +136,7 @@ public class DistributionLocatorId implements java.io.Serializable {
       this.port = Integer.parseInt(marshalled.substring(portStartIdx + 1, portEndIdx));
     } catch (NumberFormatException nfe) {
       throw new IllegalArgumentException(
-          String.format("%s  does not contain a valid port number",
+          String.format("%s does not contain a valid port number",
               marshalled));
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RegionAdminRequest.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RegionAdminRequest.java
index e1d73cc..5e140c6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RegionAdminRequest.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RegionAdminRequest.java
@@ -51,7 +51,7 @@ public abstract class RegionAdminRequest extends AdminRequest {
     Region r = cache.getRegion(regionName);
     if (r == null) {
       throw new RegionNotFoundException(
-          String.format("Region  %s  not found in remote cache %s.",
+          String.format("Region %s not found in remote cache %s.",
               new Object[] {regionName, cache.getName()}));
     }
     return r;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RegionResponse.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RegionResponse.java
index 8025605..914dce2 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RegionResponse.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RegionResponse.java
@@ -80,7 +80,7 @@ public class RegionResponse extends AdminResponse {
 
           default:
             throw new InternalGemFireException(
-                String.format("Unknown RegionRequest operation:  %s",
+                String.format("Unknown RegionRequest operation: %s",
                     Integer.valueOf(action)));
         }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteAlert.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteAlert.java
index b4df191..31c1b66 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteAlert.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteAlert.java
@@ -135,7 +135,7 @@ public class RemoteAlert implements Alert {
 
     } catch (ParseException ex) {
       throw new IllegalArgumentException(
-          String.format("Invalidate timestamp:  %s", sb.toString()));
+          String.format("Invalidate timestamp: %s", sb.toString()));
     }
 
     // Assume that the connection name is only one token...
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteBridgeServer.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteBridgeServer.java
index 5c887c6..0b1f885 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteBridgeServer.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteBridgeServer.java
@@ -38,7 +38,7 @@ import org.apache.geode.internal.cache.tier.Acceptor;
 
 /**
  * A remote (serializable) implementation of <code>BridgeServer</code> that is passed between
- * administration VMs and VMs that host caches with bridge servers.
+ * administration VMs and VMs that host caches with cache servers.
  *
  * @since GemFire 4.0
  */
@@ -47,10 +47,10 @@ public class RemoteBridgeServer extends AbstractCacheServer
 
   private static final long serialVersionUID = 8417391824652384959L;
 
-  /** Is this bridge server running? */
+  /** Is this cache server running? */
   private boolean isRunning;
 
-  /** The id of this bridge server */
+  /** The id of this cache server */
   private int id;
 
 
@@ -133,7 +133,7 @@ public class RemoteBridgeServer extends AbstractCacheServer
   }
 
   /**
-   * Returns the cache that is served by this bridge server or <code>null</code> if this server is
+   * Returns the cache that is served by this cache server or <code>null</code> if this server is
    * not running.
    */
   @Override
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteCacheInfo.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteCacheInfo.java
index 1d96b27..c84610c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteCacheInfo.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteCacheInfo.java
@@ -49,7 +49,7 @@ public class RemoteCacheInfo implements CacheInfo, DataSerializable {
   private String[] rootRegionNames;
   private RemoteStatResource perfStats;
 
-  /** The ids of the bridge servers associated with this cache */
+  /** The ids of the cache servers associated with this cache */
   private int[] bridgeServerIds;
 
   /** Is this is a cache server? */
@@ -217,7 +217,7 @@ public class RemoteCacheInfo implements CacheInfo, DataSerializable {
 
   @Override
   public String toString() {
-    return String.format("Information about the cache %s with %s bridge servers",
+    return String.format("Information about the cache %s with %s cache servers",
         this.name, this.bridgeServerIds.length);
   }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGemFireVM.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGemFireVM.java
index f4d6361..ba61bbd 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGemFireVM.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGemFireVM.java
@@ -870,7 +870,7 @@ public abstract class RemoteGemFireVM implements GemFireVM {
   AdminResponse sendAndWait(AdminRequest msg) {
     if (unreachable) {
       throw new OperationCancelledException(
-          String.format("%s  is unreachable. It has either left or crashed.",
+          String.format("%s is unreachable. It has either left or crashed.",
               this.name));
     }
     if (this.id == null) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGfManagerAgent.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGfManagerAgent.java
index 1693737..92a8657 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGfManagerAgent.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGfManagerAgent.java
@@ -245,7 +245,7 @@ class RemoteGfManagerAgent implements GfManagerAgent {
   public RemoteGfManagerAgent(GfManagerAgentConfig cfg) {
     if (!(cfg.getTransport() instanceof RemoteTransportConfig)) {
       throw new IllegalArgumentException(
-          String.format("Expected  %s  to be a RemoteTransportConfig",
+          String.format("Expected %s to be a RemoteTransportConfig",
               cfg.getTransport()));
     }
     this.transport = (RemoteTransportConfig) cfg.getTransport();
@@ -545,7 +545,7 @@ class RemoteGfManagerAgent implements GfManagerAgent {
     try {
       if (((Boolean) sending.get()).booleanValue()) {
         throw new OperationCancelledException(
-            String.format("Recursion detected while sending  %s",
+            String.format("Recursion detected while sending %s",
                 msg));
 
       } else {
@@ -560,7 +560,7 @@ class RemoteGfManagerAgent implements GfManagerAgent {
         // bug 39824: generate CancelException if we're shutting down
         dm.getCancelCriterion().checkCancelInProgress(null);
         throw new RuntimeAdminException(
-            String.format("%s  is not currently connected.",
+            String.format("%s is not currently connected.",
                 this));
       }
 
@@ -1071,7 +1071,7 @@ class RemoteGfManagerAgent implements GfManagerAgent {
         case ClusterDistributionManager.LONER_DM_TYPE:
           break; // should this ever happen? :-)
         default:
-          throw new IllegalArgumentException(String.format("Unknown VM Kind:  %s",
+          throw new IllegalArgumentException(String.format("Unknown VM Kind: %s",
               Integer.valueOf(id.getVmKind())));
       }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractCacheServer.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractCacheServer.java
index ef5874d..5030771 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractCacheServer.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractCacheServer.java
@@ -43,10 +43,10 @@ public abstract class AbstractCacheServer implements InternalCacheServer {
   public static final String TEST_OVERRIDE_DEFAULT_PORT_PROPERTY =
       DistributionConfig.GEMFIRE_PREFIX + "test.CacheServer.OVERRIDE_DEFAULT_PORT";
 
-  /** The cache that is served by this bridge server */
+  /** The cache that is served by this cache server */
   protected final InternalCache cache;
 
-  /** The port that the bridge server was configured to run on */
+  /** The port that the cache server was configured to run on */
   protected int port;
 
   /** The maximum number of connections that the BridgeServer will accept */
@@ -55,7 +55,7 @@ public abstract class AbstractCacheServer implements InternalCacheServer {
   /** The maximum number of threads that the BridgeServer will create */
   protected int maxThreads;
 
-  /** Whether the bridge server notifies by subscription */
+  /** Whether the cache server notifies by subscription */
   protected boolean notifyBySubscription = true;
 
   /**
@@ -359,7 +359,7 @@ public abstract class AbstractCacheServer implements InternalCacheServer {
   }
 
   /**
-   * Returns whether or not this bridge server has the same configuration as another bridge server.
+   * Returns whether or not this cache server has the same configuration as another cache server.
    */
   public boolean sameAs(CacheServer other) {
     return getPort() == other.getPort() && eq(getBindAddress(), other.getBindAddress())
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractDiskRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractDiskRegion.java
index 1ac7e64..9a2abfb 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractDiskRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractDiskRegion.java
@@ -309,17 +309,17 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
         this.compressor = compressorClass.newInstance();
       } catch (ClassNotFoundException e) {
         throw new IllegalArgumentException(
-            String.format("Unknown Compressor  %s  found in disk initialization file.",
+            String.format("Unknown Compressor %s found in disk initialization file.",
                 compressorClassName),
             e);
       } catch (InstantiationException e) {
         throw new IllegalArgumentException(
-            String.format("Unknown Compressor  %s  found in disk initialization file.",
+            String.format("Unknown Compressor %s found in disk initialization file.",
                 compressorClassName),
             e);
       } catch (IllegalAccessException e) {
         throw new IllegalArgumentException(
-            String.format("Unknown Compressor  %s  found in disk initialization file.",
+            String.format("Unknown Compressor %s found in disk initialization file.",
                 compressorClassName),
             e);
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java
index 7dbad98..bb99ce0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java
@@ -1644,7 +1644,7 @@ public abstract class AbstractRegion implements InternalRegion, AttributesMutato
     }
     if (result.size() > 1)
       throw new FunctionDomainException(
-          String.format("selectValue expects results of size 1, but found results of size  %s",
+          String.format("selectValue expects results of size 1, but found results of size %s",
               result.size()));
     return result.iterator().next();
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
index 62b27a9..d0633f4 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
@@ -1941,7 +1941,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
             this.primaryState = requestedState;
             break;
           default:
-            throw new IllegalStateException(String.format("Cannot change from  %s  to  %s",
+            throw new IllegalStateException(String.format("Cannot change from %s to %s",
                 new Object[] {this.primaryStateToString(),
                     this.primaryStateToString(requestedState)}));
         }
@@ -1976,7 +1976,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
             this.primaryState = requestedState;
             break;
           default:
-            throw new IllegalStateException(String.format("Cannot change from  %s  to  %s",
+            throw new IllegalStateException(String.format("Cannot change from %s to %s",
                 new Object[] {this.primaryStateToString(),
                     this.primaryStateToString(requestedState)}));
         }
@@ -2002,7 +2002,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
             this.primaryState = requestedState;
             break;
           default:
-            throw new IllegalStateException(String.format("Cannot change from  %s  to  %s",
+            throw new IllegalStateException(String.format("Cannot change from %s to %s",
                 new Object[] {this.primaryStateToString(),
                     this.primaryStateToString(requestedState)}));
         }
@@ -2041,7 +2041,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
             this.primaryState = requestedState;
             break;
           default:
-            throw new IllegalStateException(String.format("Cannot change from  %s  to  %s",
+            throw new IllegalStateException(String.format("Cannot change from %s to %s",
                 new Object[] {this.primaryStateToString(),
                     this.primaryStateToString(requestedState)}));
         }
@@ -2092,7 +2092,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
           }
             break;
           default:
-            throw new IllegalStateException(String.format("Cannot change from  %s  to  %s",
+            throw new IllegalStateException(String.format("Cannot change from %s to %s",
                 new Object[] {this.primaryStateToString(),
                     this.primaryStateToString(requestedState)}));
         }
@@ -2139,7 +2139,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
           }
             break;
           default:
-            throw new IllegalStateException(String.format("Cannot change from  %s  to  %s",
+            throw new IllegalStateException(String.format("Cannot change from %s to %s",
                 new Object[] {this.primaryStateToString(),
                     this.primaryStateToString(requestedState)}));
         }
@@ -2198,7 +2198,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
             return false;
           default:
             throw new IllegalStateException(
-                String.format("Cannot change from  %s  to  %s  for bucket  %s",
+                String.format("Cannot change from %s to %s for bucket %s",
                     new Object[] {this.primaryStateToString(),
                         this.primaryStateToString(requestedState), getAdvisee().getName()}));
         }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index 0efa70b..62d4605 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -507,7 +507,7 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   // Entry (Put/Create) rules
   // If this is a primary for the bucket
   // 1) apply op locally, aka update or create entry
-  // 2) distribute op to bucket secondaries and bridge servers with synchrony on local entry
+  // 2) distribute op to bucket secondaries and cache servers with synchrony on local entry
   // 3) cache listener with synchrony on entry
   // Else not a primary
   // 1) apply op locally
@@ -892,7 +892,7 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   // Entry Invalidation rules
   // If this is a primary for the bucket
   // 1) apply op locally, aka update entry
-  // 2) distribute op to bucket secondaries and bridge servers with synchrony on local entry
+  // 2) distribute op to bucket secondaries and cache servers with synchrony on local entry
   // 3) cache listener with synchrony on entry
   // 4) update local bs, gateway
   // Else not a primary
@@ -1162,7 +1162,7 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   // Entry Destruction rules
   // If this is a primary for the bucket
   // 1) apply op locally, aka destroy entry (REMOVED token)
-  // 2) distribute op to bucket secondaries and bridge servers with synchrony on local entry
+  // 2) distribute op to bucket secondaries and cache servers with synchrony on local entry
   // 3) cache listener with synchrony on local entry
   // 4) update local bs, gateway
   // Else not a primary
@@ -1392,7 +1392,7 @@ public class BucketRegion extends DistributedRegion implements Bucket {
 
   public boolean isPrimary() {
     throw new UnsupportedOperationException(
-        String.format("This should never be called on  %s",
+        String.format("This should never be called on %s",
             getClass()));
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisor.java
index 154a03e..273e8a0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisor.java
@@ -216,7 +216,7 @@ public class CacheDistributionAdvisor extends DistributionAdvisor {
           badIds.append(", ");
       }
       throw new IllegalStateException(
-          String.format("Illegal Region Configuration for members:  %s",
+          String.format("Illegal Region Configuration for members: %s",
               badIds.toString()));
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
index 517c386..b0f4b51 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
@@ -110,7 +110,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   private volatile CacheServerAdvisor advisor;
 
   /**
-   * The monitor used to monitor load on this bridge server and distribute load to the locators
+   * The monitor used to monitor load on this cache server and distribute load to the locators
    *
    * @since GemFire 5.7
    */
@@ -157,7 +157,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   }
 
   /**
-   * Checks to see whether or not this bridge server is running. If so, an
+   * Checks to see whether or not this cache server is running. If so, an
    * {@link IllegalStateException} is thrown.
    */
   private void checkRunning() {
@@ -363,10 +363,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
     // Force initialization on current cache
     ClientHealthMonitoringRegion.getInstance(this.cache);
     this.cache.getLogger()
-        .config(String.format("CacheServer Configuration:   %s", getConfig()));
+        .config(String.format("CacheServer Configuration:  %s", getConfig()));
 
     /*
-     * If the stopped bridge server is restarted, we'll need to re-register the client membership
+     * If the stopped cache server is restarted, we'll need to re-register the client membership
      * listener. If the listener is already registered it won't be registered as would the case when
      * start() is invoked for the first time.
      */
@@ -395,7 +395,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
 
 
   /**
-   * Gets the address that this bridge server can be contacted on from external processes.
+   * Gets the address that this cache server can be contacted on from external processes.
    *
    * @since GemFire 5.7
    */
@@ -406,7 +406,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   public String getExternalAddress(boolean checkServerRunning) {
     if (checkServerRunning) {
       if (!this.isRunning()) {
-        String s = "A bridge server's bind address is only available if it has been started";
+        String s = "A cache server's bind address is only available if it has been started";
         this.cache.getCancelCriterion().checkCancelInProgress(null);
         throw new IllegalStateException(s);
       }
@@ -711,7 +711,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   }
 
   /**
-   * Returns an array of all the groups of this bridge server. This includes those from the groups
+   * Returns an array of all the groups of this cache server. This includes those from the groups
    * gemfire property and those explicitly added to this server.
    */
   public String[] getCombinedGroups() {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
index 407f17e2..8a66ebb 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
@@ -150,18 +150,18 @@ public class CacheServerLauncher {
     out.println("\t"
         + "<classpath> Location of user classes required by the cache server.  This path is appended to the current classpath.");
     out.println(
-        "\t" + "<attName> Distributed system attribute such as mcast-port or cache-xml-file.");
-    out.println("\t" + "-rebalance  Indicates that the Cache should immediately be rebalanced");
+        "\t<attName> Distributed system attribute such as mcast-port or cache-xml-file.");
+    out.println("\t-rebalance  Indicates that the Cache should immediately be rebalanced");
     out.println(
-        "\t" + "-disable-default-server  Do not add a default <cache-server>");
+        "\t-disable-default-server  Do not add a default <cache-server>");
     out.println("\t"
         + "<server-port>  Port the server is to listen on for client connections. This overrides the port set in the <cache-server> element of the cache-xml-file");
     out.println(
-        "\t" + "<server-bind-address>  Address the server is to listen on for client connections. This overrides the bind-address set in the <cache-server> element of the cache-xml-file");
+        "\t<server-bind-address>  Address the server is to listen on for client connections. This overrides the bind-address set in the <cache-server> element of the cache-xml-file");
     out.println(
-        "\t" + "<critical-heap-percentage>  Sets the critical heap threshold limit of the Resource Manager. This best works with parallel young generation collector (UseParNewGC) and concurrent low pause collector (UseConcMarkSweepGC) with appropriate CMSInitiatingOccupancyFraction like 50%. This overrides the critical-heap-percentage set in the <resource-manager> element of the cache-xml-file");
+        "\t<critical-heap-percentage>  Sets the critical heap threshold limit of the Resource Manager. This best works with parallel young generation collector (UseParNewGC) and concurrent low pause collector (UseConcMarkSweepGC) with appropriate CMSInitiatingOccupancyFraction like 50%. This overrides the critical-heap-percentage set in the <resource-manager> element of the cache-xml-file");
     out.println(
-        "\t" + "<eviction-heap-percentage>  Sets the eviction heap threshold limit of the Resource Manager above which the eviction should begin on Regions configured for eviction by heap LRU. This overrides the eviction-heap-percentage set in the resource-manager> element of the cache-xml-file");
+        "\t<eviction-heap-percentage>  Sets the eviction heap threshold limit of the Resource Manager above which the eviction should begin on Regions configured for eviction by heap LRU. This overrides the eviction-heap-percentage set in the resource-manager> element of the cache-xml-file");
     out.println("\t"
         + "<critical-Off-heap-percentage>  Sets the critical off-heap threshold limit of the Resource Manager. This overrides the critical-off-heap-percentage set in the <resource-manager> element of the cache-xml-file");
     out.println("\t"
@@ -177,7 +177,7 @@ public class CacheServerLauncher {
         + "<workingdir> Directory in which cacheserver runs, default is the current directory");
     out.println();
     out.println("cacheserver status [-dir=<workingdir>]");
-    out.println("\t" + "Reports the status and process id of a GemFire CacheServer VM");
+    out.println("\tReports the status and process id of a GemFire CacheServer VM");
     out.println("\t"
         + "<workingdir> Directory in which cacheserver runs, default is the current directory");
   }
@@ -292,7 +292,7 @@ public class CacheServerLauncher {
 
     if (!inputWorkingDirectory.exists()) {
       throw new FileNotFoundException(
-          String.format("The input working directory does not exist:  %s",
+          String.format("The input working directory does not exist: %s",
               dirValue));
     }
 
@@ -367,7 +367,7 @@ public class CacheServerLauncher {
         }
       } else {
         throw new IllegalArgumentException(
-            String.format("Unknown argument:  %s", arg));
+            String.format("Unknown argument: %s", arg));
       }
     }
 
@@ -407,7 +407,7 @@ public class CacheServerLauncher {
   protected void processUnknownStartOption(final String key, final String value,
       final Map<String, Object> options, final List<String> vmArgs, final Properties props) {
     throw new IllegalArgumentException(
-        String.format("Unknown argument:  %s", key));
+        String.format("Unknown argument: %s", key));
   }
 
   /**
@@ -456,7 +456,7 @@ public class CacheServerLauncher {
         }
       } else {
         throw new IllegalArgumentException(
-            String.format("Unknown argument:  %s", arg));
+            String.format("Unknown argument: %s", arg));
       }
     }
 
@@ -478,7 +478,7 @@ public class CacheServerLauncher {
         processDirOption(options, arg.substring(arg.indexOf("=") + 1));
       } else {
         throw new IllegalArgumentException(
-            String.format("Unknown argument:  %s", arg));
+            String.format("Unknown argument: %s", arg));
       }
     }
 
@@ -1143,7 +1143,7 @@ public class CacheServerLauncher {
                 && !lastReadMessage.equals(lastReportedMessage)) {
               long elapsedSec = TimeUnit.NANOSECONDS.toSeconds(elapsed);
               System.out.println(String.format(
-                  "The server is still starting. %s seconds have elapsed since the last log message:  %s",
+                  "The server is still starting. %s seconds have elapsed since the last log message: %s",
                   elapsedSec, status.dsMsg));
               lastReportedMessage = lastReadMessage;
             }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/Conflatable.java b/geode-core/src/main/java/org/apache/geode/internal/cache/Conflatable.java
index 41ce253..0a31e2b 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/Conflatable.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/Conflatable.java
@@ -18,7 +18,7 @@ package org.apache.geode.internal.cache;
 import java.io.Serializable;
 
 /**
- * Interface <code>Conflatable</code> is used by the bridge server client notification mechanism to
+ * Interface <code>Conflatable</code> is used by the cache server client notification mechanism to
  * conflate messages being sent from the server to the client.
  *
  *
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ControllerAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ControllerAdvisor.java
index 4bf5ba1..eb2ae12 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ControllerAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ControllerAdvisor.java
@@ -27,7 +27,7 @@ import org.apache.geode.distributed.internal.membership.InternalDistributedMembe
 
 
 /**
- * Used to give advise to a connection controller. Bridge server currently need to know about
+ * Used to give advise to a connection controller. cache server currently need to know about
  * controller's
  *
  */
@@ -86,7 +86,7 @@ public class ControllerAdvisor extends GridAdvisor {
   }
 
   /**
-   * Describes a bridge server for distribution purposes.
+   * Describes a cache server for distribution purposes.
    */
   public static class ControllerProfile extends GridAdvisor.GridProfile {
 
@@ -102,9 +102,9 @@ public class ControllerAdvisor extends GridAdvisor {
     }
 
     /**
-     * Used to process an incoming connection controller profile. Any controller or bridge server in
+     * Used to process an incoming connection controller profile. Any controller or cache server in
      * this vm needs to be told about this incoming new controller. The reply needs to contain any
-     * controller(s) that exist in this vm and any bridge servers that exist in this vm.
+     * controller(s) that exist in this vm and any cache servers that exist in this vm.
      *
      * @since GemFire 5.7
      */
@@ -113,7 +113,7 @@ public class ControllerAdvisor extends GridAdvisor {
         boolean removeProfile, boolean exchangeProfiles, final List<Profile> replyProfiles) {
       // tell local controllers about this remote controller
       tellLocalControllers(removeProfile, exchangeProfiles, replyProfiles);
-      // tell local bridge servers about this remote controller
+      // tell local cache servers about this remote controller
       tellLocalBridgeServers(dm.getCache(), removeProfile, exchangeProfiles, replyProfiles);
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
index bfc3776..7d0553b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
@@ -403,7 +403,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
         if (replyException == null) {
           replyException = new ReplyException(t);
         } else {
-          logger.warn(String.format("More than one exception thrown in  %s", this),
+          logger.warn(String.format("More than one exception thrown in %s", this),
               t);
         }
       } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskInitFile.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskInitFile.java
index 9bd96ed..7cf2d51 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskInitFile.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskInitFile.java
@@ -1296,7 +1296,7 @@ public class DiskInitFile implements DiskInitFileInterpreter {
       writeIFRecord(bb);
     } catch (IOException ex) {
       throw new DiskAccessException(
-          String.format("Failed saving instantiator to disk because:  %s",
+          String.format("Failed saving instantiator to disk because: %s",
               ex),
           this.parent);
     } finally {
@@ -1350,7 +1350,7 @@ public class DiskInitFile implements DiskInitFileInterpreter {
       writeIFRecord(bb);
     } catch (IOException ex) {
       throw new DiskAccessException(
-          String.format("Failed saving data serializer to disk because:  %s",
+          String.format("Failed saving data serializer to disk because: %s",
               ex),
           this.parent);
     } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java
index dccef89..6eb01b3 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java
@@ -106,7 +106,7 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
     if (timeInterval < 0) {
       throw new IllegalArgumentException(
           String.format(
-              "Time Interval specified has to be a non-negative number and the value given  %s  is not acceptable",
+              "Time Interval specified has to be a non-negative number and the value given %s is not acceptable",
               timeInterval));
     }
     this.attrs.timeInterval = timeInterval;
@@ -261,7 +261,7 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
     } else if (maxOplogSize < 0) {
       throw new IllegalArgumentException(
           String.format(
-              "Maximum Oplog size specified has to be a non-negative number and the value given  %s  is not acceptable",
+              "Maximum Oplog size specified has to be a non-negative number and the value given %s is not acceptable",
               maxOplogSize));
     }
     this.attrs.maxOplogSizeInBytes = maxOplogSize * (1024 * 1024);
@@ -275,7 +275,7 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
     if (maxOplogSizeInBytes < 0) {
       throw new IllegalArgumentException(
           String.format(
-              "Maximum Oplog size specified has to be a non-negative number and the value given  %s  is not acceptable",
+              "Maximum Oplog size specified has to be a non-negative number and the value given %s is not acceptable",
               maxOplogSizeInBytes));
     }
     this.attrs.maxOplogSizeInBytes = maxOplogSizeInBytes;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
index a1a15f7..ffdc44a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
@@ -922,7 +922,7 @@ public class DiskStoreImpl implements DiskStore {
       }
       throw new DiskAccessException(
           String.format(
-              "Data  for DiskEntry having DiskId as  %s  could not be obtained from Disk. A clear operation may have deleted the oplogs",
+              "Data  for DiskEntry having DiskId as %s could not be obtained from Disk. A clear operation may have deleted the oplogs",
               id),
           dr.getName());
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java
index eb74c76..aac3015 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java
@@ -707,7 +707,7 @@ public abstract class DistributedCacheOperation {
       }
       throw e;
     } catch (RuntimeException e) {
-      logger.info(String.format("Exception occurred while processing  %s", this),
+      logger.info(String.format("Exception occurred while processing %s", this),
           e);
       throw e;
     } finally {
@@ -1264,7 +1264,7 @@ public abstract class DistributedCacheOperation {
           }
           sendReply(getSender(), processorId, rex, getReplySender(dm));
         } else if (thr != null) {
-          logger.error(String.format("Exception occurred while processing  %s",
+          logger.error(String.format("Exception occurred while processing %s",
               this),
               thr);
         }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
index fb146c8..f1174a0 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
@@ -978,7 +978,7 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
     if (!this.scope.isGlobal()) {
       throw new IllegalStateException(
           String.format(
-              "Distribution locks are only supported for regions with GLOBAL scope, not  %s",
+              "Distribution locks are only supported for regions with GLOBAL scope, not %s",
               this.scope));
     }
     return new RegionDistributedLock();
@@ -992,7 +992,7 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
     if (!this.scope.isGlobal()) {
       throw new IllegalStateException(
           String.format(
-              "Distribution locks are only supported for regions with GLOBAL scope, not  %s",
+              "Distribution locks are only supported for regions with GLOBAL scope, not %s",
               this.scope));
     }
     if (isLockingSuspendedByCurrentThread()) {
@@ -2216,7 +2216,7 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
     if (!this.scope.isGlobal()) {
       throw new IllegalStateException(
           String.format(
-              "Distribution locks are only supported for regions with GLOBAL scope, not  %s",
+              "Distribution locks are only supported for regions with GLOBAL scope, not %s",
               this.scope));
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegionFunctionStreamingMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegionFunctionStreamingMessage.java
index 02272ff..d657774 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegionFunctionStreamingMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegionFunctionStreamingMessage.java
@@ -140,10 +140,10 @@ public class DistributedRegionFunctionStreamingMessage extends DistributionMessa
         InternalCache cache = dm.getCache();
         if (cache != null) {
           thr = cache
-              .getCacheClosedException(String.format("Remote cache is closed:  %s",
+              .getCacheClosedException(String.format("Remote cache is closed: %s",
                   dm.getId()));
         } else {
-          thr = new CacheClosedException(String.format("Remote cache is closed:  %s",
+          thr = new CacheClosedException(String.format("Remote cache is closed: %s",
               dm.getId()));
         }
         return;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
index a3a555d..a6db3db 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
@@ -2029,7 +2029,7 @@ public class EntryEventImpl implements InternalEntryEvent, InternalCacheEvent,
   public static byte[] serialize(Object obj, Version version) {
     if (obj == null || obj == Token.NOT_AVAILABLE || Token.isInvalidOrRemoved(obj))
       throw new IllegalArgumentException(
-          String.format("Must not serialize  %s  in this context.",
+          String.format("Must not serialize %s in this context.",
               obj));
     try {
       return BlobHelper.serializeToBlob(obj, version);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ExpiryTask.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ExpiryTask.java
index 7607db0..43fdaac 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ExpiryTask.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ExpiryTask.java
@@ -292,7 +292,7 @@ public abstract class ExpiryTask extends SystemTimer.SystemTimerTask {
     if (action.isLocalDestroy())
       return localDestroy();
     throw new InternalGemFireError(
-        String.format("unrecognized expiration action:  %s", action));
+        String.format("unrecognized expiration action: %s", action));
   }
 
   /**
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java b/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java
index b54490d..72f5f8c 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java
@@ -305,10 +305,10 @@ public class FilterProfile implements DataSerializableFixedID {
       filterClass = ClassLoadUtil.classFromName((String) interest);
       filter = (InterestFilter) filterClass.newInstance();
     } catch (ClassNotFoundException cnfe) {
-      throw new RuntimeException(String.format("Class  %s  not found in classpath.",
+      throw new RuntimeException(String.format("Class %s not found in classpath.",
           interest), cnfe);
     } catch (Exception e) {
-      throw new RuntimeException(String.format("Class  %s  could not be instantiated.",
+      throw new RuntimeException(String.format("Class %s could not be instantiated.",
           interest), e);
     }
     Map interestMap = filts.get(clientID);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/FindDurableQueueProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/FindDurableQueueProcessor.java
index aa95f13..95b5e3a 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/FindDurableQueueProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/FindDurableQueueProcessor.java
@@ -41,7 +41,7 @@ import org.apache.geode.internal.logging.LogService;
 
 /**
  * A processor for helping a locator find the durable queues for a given durable client id. Asks
- * each bridge server if they have the durable id and builds a list of matching servers.
+ * each cache server if they have the durable id and builds a list of matching servers.
  *
  * @since GemFire 5.7
  */
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
index ceb93c4..25ffb14 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
@@ -945,7 +945,7 @@ public class GemFireCacheImpl implements InternalCache, InternalClientCache, Has
       // uncomment following line when debugging CacheExistsException
       if (DEBUG_CREATION_STACK) {
         this.creationStack = new Exception(
-            String.format("Created GemFireCache  %s", toString()));
+            String.format("Created GemFireCache %s", toString()));
       }
 
       this.txEntryStateFactory = TXEntryState.getFactory();
@@ -3489,7 +3489,7 @@ public class GemFireCacheImpl implements InternalCache, InternalClientCache, Has
     Object old = this.reinitializingRegions.putIfAbsent(fullPath, new FutureResult(this.stopper));
     if (old != null) {
       throw new IllegalStateException(
-          String.format("Found an existing reinitalizing region named  %s",
+          String.format("Found an existing reinitalizing region named %s",
               fullPath));
     }
   }
@@ -3505,7 +3505,7 @@ public class GemFireCacheImpl implements InternalCache, InternalClientCache, Has
     FutureResult future = (FutureResult) this.reinitializingRegions.get(regionName);
     if (future == null) {
       throw new IllegalStateException(
-          String.format("Could not find a reinitializing region named  %s",
+          String.format("Could not find a reinitializing region named %s",
               regionName));
     }
     future.set(region);
@@ -3779,7 +3779,7 @@ public class GemFireCacheImpl implements InternalCache, InternalClientCache, Has
         this.allGatewaySenders = Collections.unmodifiableSet(newSenders);
       } else {
         throw new IllegalStateException(
-            String.format("A GatewaySender with id  %s  is already defined in this cache.",
+            String.format("A GatewaySender with id %s is already defined in this cache.",
                 sender.getId()));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/GridAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/GridAdvisor.java
index 3c15fe9..a9fcb4f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/GridAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/GridAdvisor.java
@@ -80,7 +80,7 @@ public abstract class GridAdvisor extends DistributionAdvisor {
   }
 
   /**
-   * Return an unmodifiable Set<DistributedMember> of the bridge servers in this system.
+   * Return an unmodifiable Set<DistributedMember> of the cache servers in this system.
    */
   public Set adviseBridgeServers() {
     Set/* <DistributedMember> */ result = this.cachedBridgeServerAdvise;
@@ -221,7 +221,7 @@ public abstract class GridAdvisor extends DistributionAdvisor {
     try {
       new UpdateAttributesProcessor(getAdvisee(), true/* removeProfile */).distribute();
 
-      // Notify any local bridge servers or controllers
+      // Notify any local cache servers or controllers
       // that we are closing.
       GridProfile profile = (GridProfile) createProfile();
       profile.tellLocalBridgeServers(getDistributionManager().getCache(), true, false, null);
@@ -321,8 +321,8 @@ public abstract class GridAdvisor extends DistributionAdvisor {
     }
 
     /**
-     * Tell local bridge servers about the received profile. Also if exchange profiles then add each
-     * local bridge server to reply.
+     * Tell local cache servers about the received profile. Also if exchange profiles then add each
+     * local cache server to reply.
      *
      * @since GemFire 5.7
      */
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalDataSet.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalDataSet.java
index 1544baa..5bd6aab 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalDataSet.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalDataSet.java
@@ -149,7 +149,7 @@ public class LocalDataSet implements Region, QueryExecutor {
     }
     if (result.size() > 1) {
       throw new FunctionDomainException(
-          String.format("selectValue expects results of size 1, but found results of size  %s",
+          String.format("selectValue expects results of size 1, but found results of size %s",
               Integer.valueOf(result.size())));
     }
     return result.iterator().next();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index 81e3640..6a8b6ab 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -2132,7 +2132,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
       DataPolicy dp = getDataPolicy();
       if (dp.isEmpty()) {
         throw new IllegalStateException(
-            String.format("Cannot write a region with data-policy  %s  to disk.",
+            String.format("Cannot write a region with data-policy %s to disk.",
                 dp));
       } else if (!dp.withPersistence() && !isOverflowEnabled()) {
         throw new IllegalStateException(
@@ -2757,7 +2757,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     checkReadiness();
     // Localized string for partitioned region is generic enough for general use
     throw new EntryNotFoundException(
-        String.format("Entry not found for key  %s", entryKey));
+        String.format("Entry not found for key %s", entryKey));
   }
 
   /**
@@ -3593,7 +3593,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
   public void saveSnapshot(OutputStream outputStream) throws IOException {
     if (isProxy()) {
       throw new UnsupportedOperationException(
-          String.format("Regions with DataPolicy  %s  do not support saveSnapshot.",
+          String.format("Regions with DataPolicy %s do not support saveSnapshot.",
               getDataPolicy()));
     }
     checkForNoAccess();
@@ -3643,7 +3643,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
       throws CacheWriterException, TimeoutException, ClassNotFoundException, IOException {
     if (isProxy()) {
       throw new UnsupportedOperationException(
-          String.format("Regions with DataPolicy  %s  do not support loadSnapshot.",
+          String.format("Regions with DataPolicy %s do not support loadSnapshot.",
               getDataPolicy()));
     }
     if (inputStream == null) {
@@ -4045,7 +4045,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
           "InterestType.OQL_QUERY not yet supported");
 
     } else {
-      throw new IllegalArgumentException(String.format("Unsupported interest type:  %s",
+      throw new IllegalArgumentException(String.format("Unsupported interest type: %s",
           interestType));
     }
     return ret;
@@ -4206,10 +4206,10 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
       filter = (InterestFilter) filterClass.newInstance();
     } catch (ClassNotFoundException cnfe) {
       throw new RuntimeException(
-          String.format("Class  %s  not found in classpath.", key), cnfe);
+          String.format("Class %s not found in classpath.", key), cnfe);
     } catch (Exception e) {
       throw new RuntimeException(
-          String.format("Class  %s  could not be instantiated.", key), e);
+          String.format("Class %s could not be instantiated.", key), e);
     }
 
     for (Object entryObject : entrySet(false)) {
@@ -4797,7 +4797,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     RegionEntry regionEntry = this.entries.initRecoveredEntry(key, re);
     if (regionEntry == null) {
       throw new InternalGemFireError(
-          String.format("Entry already existed:  %s", key));
+          String.format("Entry already existed: %s", key));
     }
     return (DiskEntry) regionEntry;
   }
@@ -5315,7 +5315,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
 
   /**
    * Perform an update in a bridge client. See CacheClientUpdater.handleUpdate() The op is from the
-   * bridge server and should not be distributed back to it.
+   * cache server and should not be distributed back to it.
    */
   public void basicBridgeClientUpdate(DistributedMember serverId, Object key, Object value,
       byte[] deltaBytes, boolean isObject, Object callbackArgument, boolean isCreate,
@@ -5373,7 +5373,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
   }
 
   /**
-   * Perform an invalidate in a bridge client. The op is from the bridge server and should not be
+   * Perform an invalidate in a bridge client. The op is from the cache server and should not be
    * distributed back to it.
    */
   public void basicBridgeClientInvalidate(DistributedMember serverId, Object key,
@@ -5422,7 +5422,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
   }
 
   /**
-   * Perform a destroy in a bridge client. The op is from the bridge server and should not be
+   * Perform a destroy in a bridge client. The op is from the cache server and should not be
    * distributed back to it.
    */
   public void basicBridgeClientDestroy(DistributedMember serverId, Object key,
@@ -6111,7 +6111,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     if (shouldNotifyBridgeClients()) {
       if (numBS > 0) {
         if (logger.isDebugEnabled()) {
-          logger.debug("{}: notifying {} bridge servers of event: {}", this.getName(), numBS,
+          logger.debug("{}: notifying {} cache servers of event: {}", this.getName(), numBS,
               event);
         }
       }
@@ -9338,7 +9338,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
   }
 
   /**
-   * Called on a bridge server when it has a received a putAll command from a client.
+   * Called on a cache server when it has a received a putAll command from a client.
    *
    * @param map a map of key->value for the entries we are putting
    * @param retryVersions a map of key->version tag. If any of the entries are the result of a
@@ -9376,7 +9376,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
   }
 
   /**
-   * Called on a bridge server when it has a received a removeAll command from a client.
+   * Called on a cache server when it has a received a removeAll command from a client.
    *
    * @param keys a collection of the keys we are putting
    * @param retryVersions a collection of version tags. If the client is retrying a key then that
@@ -9504,7 +9504,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     boolean partialResult = false;
     RuntimeException runtimeException = null;
     if (hasServerProxy()) {
-      // send message to bridge server
+      // send message to cache server
       if (isTX()) {
         TXStateProxyImpl txState = (TXStateProxyImpl) this.cache.getTxManager().getTXState();
         txState.getRealDeal(null, this);
@@ -9725,7 +9725,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     RuntimeException runtimeException = null;
 
     if (hasServerProxy()) {
-      // send message to bridge server
+      // send message to cache server
       if (isTX()) {
         TXStateProxyImpl txState = (TXStateProxyImpl) this.cache.getTxManager().getTXState();
         txState.getRealDeal(null, this);
@@ -10536,9 +10536,9 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
 
   /**
    * Used to prevent notification of bridge clients, typically used for internal "meta" regions and
-   * if the cache doesn't have any bridge servers
+   * if the cache doesn't have any cache servers
    *
-   * @return true only if it's cache has bridge servers and this is nt a meta region
+   * @return true only if it's cache has cache servers and this is nt a meta region
    */
   @Override
   public boolean shouldNotifyBridgeClients() {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/MemberFunctionStreamingMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/MemberFunctionStreamingMessage.java
index 4cfedd7..bc409a3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/MemberFunctionStreamingMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/MemberFunctionStreamingMessage.java
@@ -169,11 +169,11 @@ public class MemberFunctionStreamingMessage extends DistributionMessage
           if (checkCacheClosing(dm) || checkDSClosing(dm)) {
             if (dm.getCache() == null) {
               thr = new CacheClosedException(
-                  String.format("Remote cache is closed:  %s",
+                  String.format("Remote cache is closed: %s",
                       dm.getId()));
             } else {
               dm.getCache().getCacheClosedException(
-                  String.format("Remote cache is closed:  %s",
+                  String.format("Remote cache is closed: %s",
                       dm.getId()));
             }
             return;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java b/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java
index 60371e4..214fa71 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java
@@ -591,7 +591,7 @@ public class Oplog implements CompactableOplog, Flushable {
         throw (DiskAccessException) ex;
       }
       throw new DiskAccessException(
-          String.format("Failed creating operation log because:  %s", ex),
+          String.format("Failed creating operation log because: %s", ex),
           getParent());
     }
   }
@@ -655,7 +655,7 @@ public class Oplog implements CompactableOplog, Flushable {
         throw (DiskAccessException) ex;
       }
       throw new DiskAccessException(
-          String.format("Failed creating operation log because:  %s", ex),
+          String.format("Failed creating operation log because: %s", ex),
           getParent());
     }
   }
@@ -948,7 +948,7 @@ public class Oplog implements CompactableOplog, Flushable {
     } catch (IOException ex) {
       getParent().getCancelCriterion().checkCancelInProgress(ex);
       throw new DiskAccessException(
-          String.format("Failed creating operation log because:  %s", ex),
+          String.format("Failed creating operation log because: %s", ex),
           getParent());
     }
     if (hasNoLiveValues() && !offline) {
@@ -1254,7 +1254,7 @@ public class Oplog implements CompactableOplog, Flushable {
     if (bb == null) {
       throw new EntryDestroyedException(
           String.format(
-              "No value was found for entry with disk Id  %s on a region  with synchronous writing set to %s",
+              "No value was found for entry with disk Id %s on a region  with synchronous writing set to %s",
               new Object[] {id, dr.isSync()}));
     }
     if (bitOnly) {
@@ -1520,7 +1520,7 @@ public class Oplog implements CompactableOplog, Flushable {
 
               default:
                 throw new DiskAccessException(
-                    String.format("Unknown opCode  %s  found in disk operation log.",
+                    String.format("Unknown opCode %s found in disk operation log.",
                         opCode),
                     getParent());
             }
@@ -1922,7 +1922,7 @@ public class Oplog implements CompactableOplog, Flushable {
               break;
             default:
               throw new DiskAccessException(
-                  String.format("Unknown opCode  %s  found in disk operation log.",
+                  String.format("Unknown opCode %s found in disk operation log.",
                       opCode),
                   getParent());
           }
@@ -2040,7 +2040,7 @@ public class Oplog implements CompactableOplog, Flushable {
       byte opCode = dis.readByte();
       if (opCode != OPLOG_GEMFIRE_VERSION) {
         throw new DiskAccessException(
-            String.format("Unknown opCode  %s  found in disk operation log.",
+            String.format("Unknown opCode %s found in disk operation log.",
                 opCode),
             getParent());
       }
@@ -2055,7 +2055,7 @@ public class Oplog implements CompactableOplog, Flushable {
       byte opCode = dis.readByte();
       if (opCode != OPLOG_GEMFIRE_VERSION) {
         throw new DiskAccessException(
-            String.format("Unknown opCode  %s  found in disk operation log.",
+            String.format("Unknown opCode %s found in disk operation log.",
                 opCode),
             getParent());
       }
@@ -5337,7 +5337,7 @@ public class Oplog implements CompactableOplog, Flushable {
                   dr.getName());
             } else if (offsetInOplog < 0) {
               throw new DiskAccessException(
-                  String.format("Cannot find record  %s  when reading from %s",
+                  String.format("Cannot find record %s when reading from %s",
                       offsetInOplog, this.diskFile.getPath()),
                   dr.getName());
             }
@@ -5428,7 +5428,7 @@ public class Oplog implements CompactableOplog, Flushable {
         getParent().getCancelCriterion().checkCancelInProgress(ex);
         throw new DiskAccessException(
             String.format(
-                "Failed reading from %s.  oplogID, %s  Offset being read= %s Current Oplog Size= %s Actual File Size, %s IS ASYNCH MODE, %s IS ASYNCH WRITER ALIVE= %s",
+                "Failed reading from %s.  oplogID, %s Offset being read= %s Current Oplog Size= %s Actual File Size, %s IS ASYNCH MODE, %s IS ASYNCH WRITER ALIVE= %s",
                 this.diskFile.getPath(), this.oplogId, offsetInOplog,
                 this.crf.currSize, this.crf.bytesFlushed, !dr.isSync(), Boolean.FALSE),
             ex, dr.getName());
@@ -5496,7 +5496,7 @@ public class Oplog implements CompactableOplog, Flushable {
                 dr.getName());
           } else if (offsetInOplog < 0) {
             throw new DiskAccessException(
-                String.format("Cannot find record  %s  when reading from %s",
+                String.format("Cannot find record %s when reading from %s",
                     offsetInOplog, this.diskFile.getPath()),
                 dr.getName());
           }
@@ -5526,7 +5526,7 @@ public class Oplog implements CompactableOplog, Flushable {
         getParent().getCancelCriterion().checkCancelInProgress(ex);
         throw new DiskAccessException(
             String.format(
-                "Failed reading from %s.  oplogID, %s Offset being read=%s Current Oplog Size=%s  Actual File Size,%s IS ASYNCH MODE,%s IS ASYNCH WRITER ALIVE=%s",
+                "Failed reading from %s.  oplogID, %s Offset being read=%s Current Oplog Size=%s Actual File Size,%s IS ASYNCH MODE,%s IS ASYNCH WRITER ALIVE=%s",
                 this.diskFile.getPath(), this.oplogId, offsetInOplog,
                 this.crf.currSize, this.crf.bytesFlushed, Boolean.FALSE, Boolean.FALSE),
             ex, dr.getName());
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java b/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java
index d612215..7a0c3c3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java
@@ -137,7 +137,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
       createCrf(parent.getActiveOverflowOplog());
     } catch (IOException ex) {
       throw new DiskAccessException(
-          String.format("Failed creating operation log because:  %s", ex),
+          String.format("Failed creating operation log because: %s", ex),
           this.parent);
     }
   }
@@ -295,7 +295,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
     if (bb == null) {
       throw new EntryDestroyedException(
           String.format(
-              "No value was found for entry with disk Id  %s on a region  with synchronous writing set to %s",
+              "No value was found for entry with disk Id %s on a region  with synchronous writing set to %s",
               new Object[] {id, dr.isSync()}));
     }
     if (bitOnly) {
@@ -903,7 +903,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
       } catch (IOException ex) {
         throw new DiskAccessException(
             String.format(
-                "Failed reading from %s.  oplogID, %s  Offset being read= %s Current Oplog Size= %s Actual File Size, %s IS ASYNCH MODE, %s IS ASYNCH WRITER ALIVE= %s",
+                "Failed reading from %s.  oplogID, %s Offset being read= %s Current Oplog Size= %s Actual File Size, %s IS ASYNCH MODE, %s IS ASYNCH WRITER ALIVE= %s",
                 this.diskFile.getPath(), (long) this.oplogId, offsetInOplog,
                 this.crf.currSize, this.crf.bytesFlushed, !dr.isSync(), false),
             ex, dr.getName());
@@ -1437,7 +1437,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
                 getParent().getName());
           } else if (offsetInOplog < 0) {
             throw new DiskAccessException(
-                String.format("Cannot find record  %s  when reading from %s",
+                String.format("Cannot find record %s when reading from %s",
                     offsetInOplog, this.diskFile.getPath()),
                 getParent().getName());
           }
@@ -1470,7 +1470,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
       } catch (IOException ex) {
         throw new DiskAccessException(
             String.format(
-                "Failed reading from %s.  oplogID, %s Offset being read=%s Current Oplog Size=%s  Actual File Size,%s IS ASYNCH MODE,%s IS ASYNCH WRITER ALIVE=%s",
+                "Failed reading from %s.  oplogID, %s Offset being read=%s Current Oplog Size=%s Actual File Size,%s IS ASYNCH MODE,%s IS ASYNCH WRITER ALIVE=%s",
                 this.diskFile.getPath(), (long) this.oplogId, offsetInOplog,
                 this.crf.currSize, this.crf.bytesFlushed, false, false),
             ex, getParent().getName());
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java
index 7724cb3..1e05065 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java
@@ -169,7 +169,7 @@ public class PRQueryProcessor {
 
           } catch (TimeoutException e) {
             throw new InternalGemFireException(
-                String.format("Timed out while executing query, time exceeded  %s",
+                String.format("Timed out while executing query, time exceeded %s",
                     BUCKET_QUERY_TIMEOUT),
                 e);
           } catch (ExecutionException ee) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionRegionConfigValidator.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionRegionConfigValidator.java
index 0a8499f..4cb3906 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionRegionConfigValidator.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionRegionConfigValidator.java
@@ -73,7 +73,7 @@ public class PartitionRegionConfigValidator {
     }
     if (userPA.getRedundantCopies() != prconfPA.getRedundantCopies()) {
       throw new IllegalStateException(
-          String.format("Requested redundancy  %s  is incompatible with existing redundancy  %s",
+          String.format("Requested redundancy %s is incompatible with existing redundancy %s",
               new Object[] {Integer.valueOf(userPA.getRedundantCopies()),
                   Integer.valueOf(prconfPA.getRedundantCopies())}));
     }
@@ -87,7 +87,7 @@ public class PartitionRegionConfigValidator {
     if (!myScope.equals(prconfScope)) {
       throw new IllegalStateException(
           String.format(
-              "Scope in PartitionAttributes is incompatible with already set scope.Set the scope to  %s .",
+              "Scope in PartitionAttributes is incompatible with already set scope.Set the scope to %s .",
               prconfScope));
     }
 
@@ -95,7 +95,7 @@ public class PartitionRegionConfigValidator {
     if (userPA.getTotalNumBuckets() != prconfTotalNumBuckets) {
       throw new IllegalStateException(
           String.format(
-              "The total number of buckets found in PartitionAttributes ( %s ) is incompatible with the total number of buckets used by other distributed members. Set the number of buckets to  %s",
+              "The total number of buckets found in PartitionAttributes ( %s ) is incompatible with the total number of buckets used by other distributed members. Set the number of buckets to %s",
               new Object[] {Integer.valueOf(userPA.getTotalNumBuckets()),
                   Integer.valueOf(prconfTotalNumBuckets)}));
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index da7f58f..1911049 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -606,18 +606,18 @@ public class PartitionedRegion extends LocalRegion
       Object o = super.get(key);
       if (o == DESTROYED) {
         throw new RegionDestroyedException(
-            String.format("Region for prId= %s  is destroyed",
+            String.format("Region for prId= %s is destroyed",
                 key),
             NO_PATH_FOUND);
       }
       if (o == LOCALLY_DESTROYED) {
         throw new PRLocallyDestroyedException(
-            String.format("Region with prId= %s  is locally destroyed on this node",
+            String.format("Region with prId= %s is locally destroyed on this node",
                 key));
       }
       if (o == FAILED_REGISTRATION) {
         throw new PRLocallyDestroyedException(
-            String.format("Region with prId= %s  failed initialization on this node",
+            String.format("Region with prId= %s failed initialization on this node",
                 key));
       }
       return o;
@@ -1175,7 +1175,7 @@ public class PartitionedRegion extends LocalRegion
   }
 
   /**
-   * receive notification that a bridge server or wan gateway has been created that requires
+   * receive notification that a cache server or wan gateway has been created that requires
    * notification of cache events from this region
    */
   public void cacheRequiresNotification() {
@@ -1948,7 +1948,7 @@ public class PartitionedRegion extends LocalRegion
         throw e;
       } catch (QueryException qe) {
         throw new QueryInvocationTargetException(
-            String.format("Unexpected query exception occurred during query execution  %s",
+            String.format("Unexpected query exception occurred during query execution %s",
                 qe.getMessage()),
             qe);
       } finally {
@@ -3076,7 +3076,7 @@ public class PartitionedRegion extends LocalRegion
         int red = getRegionAdvisor().getBucketRedundancy(bucketId);
         final TimeoutException noTime = new TimeoutException(
             String.format(
-                "Attempt to acquire primary node for write on bucket  %s  timed out in  %s  ms. Current redundancy [ %s ] does not satisfy minimum [ %s ]",
+                "Attempt to acquire primary node for write on bucket %s timed out in %s ms. Current redundancy [ %s ] does not satisfy minimum [ %s ]",
                 new Object[] {bucketStringForLogs(bucketId),
                     localSnoozer.getRetryTime(), red, this.minimumWriteRedundancy}));
         checkReadiness();
@@ -3256,7 +3256,7 @@ public class PartitionedRegion extends LocalRegion
       } catch (CacheException ce) {
         // Fix for bug 36014
         throw new PartitionedRegionDistributionException(
-            String.format("Putting entry on  %s  failed",
+            String.format("Putting entry on %s failed",
                 recipient),
             ce);
       }
@@ -4330,7 +4330,7 @@ public class PartitionedRegion extends LocalRegion
             checkReadiness();
             throw new TimeoutException(
                 String.format(
-                    "Attempt to acquire primary node for read on bucket  %s  timed out in  %s  ms",
+                    "Attempt to acquire primary node for read on bucket %s timed out in %s ms",
                     new Object[] {getBucketName(buck), snoozer.getRetryTime()}));
           }
           snoozer.waitToRetryNode();
@@ -5197,11 +5197,11 @@ public class PartitionedRegion extends LocalRegion
           if (getRegionAdvisor().getBucket(bucketId).getBucketAdvisor()
               .basicGetPrimaryMember() == null) {
             throw new EntryNotFoundException(
-                String.format("Entry not found for key  %s",
+                String.format("Entry not found for key %s",
                     event.getKey()));
           }
           TimeoutException e = new TimeoutException(
-              String.format("Time out looking for target node for destroy; waited  %s  ms",
+              String.format("Time out looking for target node for destroy; waited %s ms",
                   retryTime.getRetryTime()));
           if (logger.isDebugEnabled()) {
             logger.debug(e.getMessage(), e);
@@ -5401,7 +5401,7 @@ public class PartitionedRegion extends LocalRegion
         throw e;
       } catch (CacheException ce) {
         throw new PartitionedRegionException(
-            String.format("Destroy of entry on  %s  failed",
+            String.format("Destroy of entry on %s failed",
                 recipient),
             ce);
       } catch (RegionDestroyedException ignore) {
@@ -5660,7 +5660,7 @@ public class PartitionedRegion extends LocalRegion
           if (getRegionAdvisor().isStorageAssignedForBucket(bucketId)) {
             // bucket no longer exists
             throw new EntryNotFoundException(
-                String.format("Entry not found for key  %s",
+                String.format("Entry not found for key %s",
                     event.getKey()));
           }
           break; // fall out to failed exception
@@ -5771,7 +5771,7 @@ public class PartitionedRegion extends LocalRegion
         throw e;
       } catch (CacheException ce) {
         throw new PartitionedRegionException(
-            String.format("Invalidation of entry on  %s  failed",
+            String.format("Invalidation of entry on %s failed",
                 recipient),
             ce);
       }
@@ -5825,7 +5825,7 @@ public class PartitionedRegion extends LocalRegion
         throw e;
       } catch (CacheException ce) {
         throw new PartitionedRegionException(
-            String.format("Create of entry on  %s  failed",
+            String.format("Create of entry on %s failed",
                 recipient),
             ce);
       } catch (RegionDestroyedException rde) {
@@ -6931,7 +6931,7 @@ public class PartitionedRegion extends LocalRegion
   public void checkClosed() {
     if (this.isClosed) {
       throw new RegionDestroyedException(
-          String.format("PR  %s  is locally closed", this),
+          String.format("PR %s is locally closed", this),
           getFullPath());
     }
   }
@@ -8222,7 +8222,7 @@ public class PartitionedRegion extends LocalRegion
       if (getLocalMaxMemory() != 0) {
         throw new IndexCreationException(
             String.format(
-                "Data Store on this vm is null and the local max Memory is not zero, the data policy is  %s  and the localMaxMemeory is :  %s",
+                "Data Store on this vm is null and the local max Memory is not zero, the data policy is %s and the localMaxMemeory is : %s",
                 getDataPolicy(), (long) getLocalMaxMemory()));
       }
       // Not have to do anything since the region is just an Accessor and
@@ -8362,7 +8362,7 @@ public class PartitionedRegion extends LocalRegion
       if (getLocalMaxMemory() != 0) {
         throw new IndexCreationException(
             String.format(
-                "Data Store on this vm is null and the local max Memory is not zero, the data policy is  %s  and the localMaxMemeory is :  %s",
+                "Data Store on this vm is null and the local max Memory is not zero, the data policy is %s and the localMaxMemeory is : %s",
                 getDataPolicy(), (long) getLocalMaxMemory()));
       }
       // Not have to do anything since the region is just an Accessor and
@@ -8594,7 +8594,7 @@ public class PartitionedRegion extends LocalRegion
               .setRemoteBucketesIndexed(remoteIndexBucketsMap.get(ind.getName()));
         }
       } catch (ForceReattemptException e) {
-        logger.info(String.format("ForceReattempt exception :  %s", e));
+        logger.info(String.format("ForceReattempt exception : %s", e));
       }
     }
   }
@@ -8686,7 +8686,7 @@ public class PartitionedRegion extends LocalRegion
     // Check if the returned value is instance of Index (this means the index is
     // not in create phase, its created successfully).
     if (prIndex == null || !(prIndex instanceof Index)) {
-      logger.info("This index  %s is not on this partitoned region :  {}",
+      logger.info("This index %s is not on this partitoned region :  {}",
           ind, this);
       return numBuckets;
     }
@@ -9788,7 +9788,7 @@ public class PartitionedRegion extends LocalRegion
           if (getRegionAdvisor().isStorageAssignedForBucket(bucketId)) {
             // bucket no longer exists
             throw new EntryNotFoundException(
-                String.format("Entry not found for key  %s",
+                String.format("Entry not found for key %s",
                     event.getKey()));
           }
           break; // fall out to failed exception
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
index f2851dc..4ef5128 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
@@ -1774,7 +1774,7 @@ public class PartitionedRegionDataStore implements HasCachePerfStats {
             this.partitionedRegion);
       }
       ForceReattemptException fre = new BucketNotFoundException(
-          String.format("Bucket id  %s  not found on VM  %s",
+          String.format("Bucket id %s not found on VM %s",
 
               new Object[] {this.partitionedRegion.bucketStringForLogs(bucketId.intValue()),
                   this.partitionedRegion.getMyId()}));
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionHelper.java
index eb0fcb6..5282e96 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionHelper.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionHelper.java
@@ -250,7 +250,7 @@ public class PartitionedRegionHelper {
             if (newConf != oldConf && !newConf.isGreaterNodeListVersion(oldConf)) {
               throw new CacheWriterException(
                   String.format(
-                      "New PartitionedRegionConfig  %s  does not have newer version than previous  %s",
+                      "New PartitionedRegionConfig %s does not have newer version than previous %s",
                       new Object[] {newConf, oldConf}));
             }
           }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PlaceHolderDiskRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PlaceHolderDiskRegion.java
index 06588f8..b2f5d29 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PlaceHolderDiskRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PlaceHolderDiskRegion.java
@@ -104,7 +104,7 @@ public class PlaceHolderDiskRegion extends AbstractDiskRegion
     RegionEntry re = getRecoveredEntryMap().initRecoveredEntry(key, value);
     if (re == null) {
       throw new InternalGemFireError(
-          String.format("Entry already existed:  %s", key));
+          String.format("Entry already existed: %s", key));
     }
     return (DiskEntry) re;
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ProxyRegionMap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ProxyRegionMap.java
index 67a5b5e..29c175e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ProxyRegionMap.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ProxyRegionMap.java
@@ -388,28 +388,28 @@ class ProxyRegionMap implements RegionMap {
     @Override
     public long getLastModified() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public long getLastAccessed() throws InternalStatisticsDisabledException {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public long getHitCount() throws InternalStatisticsDisabledException {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public long getMissCount() throws InternalStatisticsDisabledException {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -456,42 +456,42 @@ class ProxyRegionMap implements RegionMap {
     @Override
     public void txDidDestroy(long currentTime) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public void resetCounts() throws InternalStatisticsDisabledException {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public void removePhase1(InternalRegion region, boolean clear) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public void removePhase2() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public boolean isRemoved() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public boolean isRemovedPhase2() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -499,7 +499,7 @@ class ProxyRegionMap implements RegionMap {
     public boolean fillInValue(InternalRegion region, Entry entry, ByteArrayDataInput in,
         DistributionManager distributionManager, final Version version) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -507,35 +507,35 @@ class ProxyRegionMap implements RegionMap {
     public boolean isOverflowedToDisk(InternalRegion region,
         DistributedRegion.DiskPosition diskPosition) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public Object getKey() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public Object getValue(RegionEntryContext context) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public Object getValueRetain(RegionEntryContext context) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public void setValue(RegionEntryContext context, Object value) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -548,28 +548,28 @@ class ProxyRegionMap implements RegionMap {
     @Override
     public Object getValue() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public Token getValueAsToken() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public Object getValueRetain(RegionEntryContext context, boolean decompress) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public Object getTransformedValue() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -581,21 +581,21 @@ class ProxyRegionMap implements RegionMap {
     @Override
     public Object getValueOnDisk(InternalRegion region) throws EntryNotFoundException {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public Object getValueOnDiskOrBuffer(InternalRegion region) throws EntryNotFoundException {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public Object getSerializedValueOnDisk(InternalRegion region) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -603,7 +603,7 @@ class ProxyRegionMap implements RegionMap {
     public boolean initialImagePut(InternalRegion region, long lastModified, Object newValue,
         boolean wasRecovered, boolean acceptedVersionTag) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -611,7 +611,7 @@ class ProxyRegionMap implements RegionMap {
     public boolean initialImageInit(InternalRegion region, long lastModified, Object newValue,
         boolean create, boolean wasRecovered, boolean acceptedVersionTag) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -621,21 +621,21 @@ class ProxyRegionMap implements RegionMap {
         boolean removeRecoveredEntry)
         throws CacheWriterException, EntryNotFoundException, TimeoutException {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public boolean getValueWasResultOfSearch() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public void setValueResultOfSearch(boolean value) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -654,77 +654,77 @@ class ProxyRegionMap implements RegionMap {
     @Override
     public Object getValueInVMOrDiskWithoutFaultIn(InternalRegion region) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public Object getValueOffHeapOrDiskWithoutFaultIn(InternalRegion region) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public boolean isUpdateInProgress() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public void setUpdateInProgress(boolean underUpdate) {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public boolean isValueNull() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public boolean isInvalid() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public boolean isDestroyed() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public void setValueToNull() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public boolean isInvalidOrRemoved() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public boolean isDestroyedOrRemoved() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
     @Override
     public boolean isDestroyedOrRemovedButNotTombstone() {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -737,7 +737,7 @@ class ProxyRegionMap implements RegionMap {
     public void setValueWithTombstoneCheck(Object value, EntryEvent event)
         throws RegionClearedException {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
@@ -755,7 +755,7 @@ class ProxyRegionMap implements RegionMap {
     public void setValue(RegionEntryContext context, Object value, EntryEventImpl event)
         throws RegionClearedException {
       throw new UnsupportedOperationException(
-          String.format("No entry support on regions with DataPolicy  %s",
+          String.format("No entry support on regions with DataPolicy %s",
               DataPolicy.EMPTY));
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/QueuedOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/QueuedOperation.java
index 89fc060..981d012 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/QueuedOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/QueuedOperation.java
@@ -75,7 +75,7 @@ public class QueuedOperation {
         lr.cmnClearRegion(re, false/* cacheWrite */, false/* useRVV */);
       } else {
         throw new IllegalStateException(
-            String.format("The  %s  should not have been queued.",
+            String.format("The %s should not have been queued.",
                 this.op));
       }
     } else {
@@ -126,7 +126,7 @@ public class QueuedOperation {
           }
         } else {
           throw new IllegalStateException(
-              String.format("The  %s  should not have been queued.",
+              String.format("The %s should not have been queued.",
                   this.op));
         }
       } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/SearchLoadAndWriteProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/SearchLoadAndWriteProcessor.java
index 76de0e7..3857fe0 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/SearchLoadAndWriteProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/SearchLoadAndWriteProcessor.java
@@ -625,7 +625,7 @@ public class SearchLoadAndWriteProcessor implements MembershipListener {
             locked = this.lock.tryLock(region.getCache().getLockTimeout(), TimeUnit.SECONDS);
             if (!locked) {
               throw new TimeoutException(
-                  String.format("Timed out locking  %s  before load",
+                  String.format("Timed out locking %s before load",
                       key));
             }
             break;
@@ -744,7 +744,7 @@ public class SearchLoadAndWriteProcessor implements MembershipListener {
               cause = this.remoteException;
             }
             throw new CacheLoaderException(
-                String.format("While invoking a remote netLoad:  %s",
+                String.format("While invoking a remote netLoad: %s",
                     cause),
                 cause);
           }
@@ -947,7 +947,7 @@ public class SearchLoadAndWriteProcessor implements MembershipListener {
           cause = this.remoteException;
         }
         throw new CacheWriterException(
-            String.format("While invoking a remote netWrite:  %s",
+            String.format("While invoking a remote netWrite: %s",
                 cause),
             cause);
       }
@@ -1217,7 +1217,7 @@ public class SearchLoadAndWriteProcessor implements MembershipListener {
           if (waitTimeMs <= 0) {
             throw new TimeoutException(
                 String.format(
-                    "Timed out while doing netsearch/netload/netwrite processorId= %s  Key is  %s",
+                    "Timed out while doing netsearch/netload/netwrite processorId= %s Key is %s",
                     new Object[] {this.processorId, this.key}));
           }
 
@@ -1255,7 +1255,7 @@ public class SearchLoadAndWriteProcessor implements MembershipListener {
                 sb.append(" lastNotifySpot=").append(lastNS);
               }
               throw new TimeoutException(
-                  String.format("Timeout during netsearch/netload/netwrite. Details:  %s",
+                  String.format("Timeout during netsearch/netload/netwrite. Details: %s",
                       sb));
             }
             return;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/StateFlushOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/StateFlushOperation.java
index 227c97a..6438462 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/StateFlushOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/StateFlushOperation.java
@@ -357,7 +357,7 @@ public class StateFlushOperation {
           // cache is closed - no distribution advisor available for the region so nothing to do but
           // send the stabilization message
         } catch (Exception e) {
-          logger.fatal(String.format("%s  Exception caught while determining channel state",
+          logger.fatal(String.format("%s Exception caught while determining channel state",
               this),
               e);
         } finally {
@@ -421,7 +421,7 @@ public class StateFlushOperation {
           // cache is closed - no distribution advisor available for the region so nothing to do but
           // send the stabilization message
         } catch (Exception e) {
-          logger.fatal(String.format("%s  Exception caught while determining channel state",
+          logger.fatal(String.format("%s Exception caught while determining channel state",
               this),
               e);
         } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXCommitMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXCommitMessage.java
index 4e0b896..a02f9ed 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXCommitMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXCommitMessage.java
@@ -508,7 +508,7 @@ public class TXCommitMessage extends PooledDistributionMessage
     if (!regionDistributionExceptions.isEmpty()) {
       throw new CommitDistributionException(
           String.format(
-              "These regions experienced reliability failure during distribution of the operation:  %s",
+              "These regions experienced reliability failure during distribution of the operation: %s",
               failedRegionNames),
           regionDistributionExceptions);
     }
@@ -752,7 +752,7 @@ public class TXCommitMessage extends PooledDistributionMessage
       CommitReplyException replyEx = null;
       if (!this.processingExceptions.isEmpty()) {
         replyEx = new CommitReplyException(
-            String.format("Commit operation generated one or more exceptions from  %s",
+            String.format("Commit operation generated one or more exceptions from %s",
                 this.getSender()),
             this.processingExceptions);
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXEntryState.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXEntryState.java
index c2934a2..ae5085e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXEntryState.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXEntryState.java
@@ -648,7 +648,7 @@ public class TXEntryState implements Releasable {
         return Operation.NET_LOAD_CREATE;
       default:
         throw new IllegalStateException(
-            String.format("<unhandled op  %s  >", Byte.valueOf(this.op)));
+            String.format("<unhandled op %s >", Byte.valueOf(this.op)));
     }
   }
 
@@ -887,7 +887,7 @@ public class TXEntryState implements Releasable {
         return Operation.NET_LOAD_UPDATE;
       default:
         throw new IllegalStateException(
-            String.format("<unhandled op  %s  >", Byte.valueOf(this.op)));
+            String.format("<unhandled op %s >", Byte.valueOf(this.op)));
     }
   }
 
@@ -1084,7 +1084,7 @@ public class TXEntryState implements Releasable {
           case OP_D_INVALIDATE_LD:
           case OP_D_DESTROY:
             throw new IllegalStateException(
-                String.format("Unexpected current op  %s  for requested op  %s",
+                String.format("Unexpected current op %s for requested op %s",
                     new Object[] {opToString(), opToString(requestedOpCode)}));
           case OP_L_INVALIDATE:
             advisedOpCode = requestedOpCode;
@@ -1139,7 +1139,7 @@ public class TXEntryState implements Releasable {
             break;
           default:
             throw new IllegalStateException(
-                String.format("Unhandled  %s", opToString()));
+                String.format("Unhandled %s", opToString()));
         }
         break;
       case OP_D_DESTROY:
@@ -1161,7 +1161,7 @@ public class TXEntryState implements Releasable {
           case OP_D_DESTROY:
           case OP_D_INVALIDATE_LD:
             throw new IllegalStateException(
-                String.format("Unexpected current op  %s  for requested op  %s",
+                String.format("Unexpected current op %s for requested op %s",
                     new Object[] {opToString(), opToString(requestedOpCode)}));
           case OP_L_INVALIDATE:
             advisedOpCode = requestedOpCode;
@@ -1211,7 +1211,7 @@ public class TXEntryState implements Releasable {
             break;
           default:
             throw new IllegalStateException(
-                String.format("Unhandled  %s", opToString()));
+                String.format("Unhandled %s", opToString()));
         }
         break;
       case OP_D_INVALIDATE:
@@ -1229,7 +1229,7 @@ public class TXEntryState implements Releasable {
           case OP_D_INVALIDATE_LD:
           case OP_D_DESTROY:
             throw new IllegalStateException(
-                String.format("Unexpected current op  %s  for requested op  %s",
+                String.format("Unexpected current op %s for requested op %s",
                     new Object[] {opToString(), opToString(requestedOpCode)}));
           case OP_D_INVALIDATE:
           case OP_L_INVALIDATE:
@@ -1275,7 +1275,7 @@ public class TXEntryState implements Releasable {
             break;
           default:
             throw new IllegalStateException(
-                String.format("Unhandled  %s", opToString()));
+                String.format("Unhandled %s", opToString()));
         }
         break;
       case OP_CREATE:
@@ -1346,7 +1346,7 @@ public class TXEntryState implements Releasable {
             // Note that OP_D_INVALIDATE followed by OP_SEARCH_PUT is not
             // possible since the netsearch will alwsys "miss" in this case.
             throw new IllegalStateException(
-                String.format("Previous op  %s  unexpected for requested op  %s",
+                String.format("Previous op %s unexpected for requested op %s",
                     new Object[] {opToString(), opToString(requestedOpCode)}));
         }
         break;
@@ -1377,13 +1377,13 @@ public class TXEntryState implements Releasable {
             // case because they should have caused a OP_SEARCH_PUT
             // to be requested.
             throw new IllegalStateException(
-                String.format("Previous op  %s  unexpected for requested op  %s",
+                String.format("Previous op %s unexpected for requested op %s",
                     new Object[] {opToString(), opToString(requestedOpCode)}));
         }
         break;
       default:
         throw new IllegalStateException(
-            String.format("OpCode  %s  should never be requested",
+            String.format("OpCode %s should never be requested",
                 opToString(requestedOpCode)));
     }
     return advisedOpCode;
@@ -1446,13 +1446,13 @@ public class TXEntryState implements Releasable {
             if (!fromString.equals(toString)) {
               throw new CommitConflictException(
                   String.format(
-                      "Entry for key  %s  on region  %s  had already been changed from  %s  to  %s",
+                      "Entry for key %s on region %s had already been changed from %s to %s",
 
                       new Object[] {key, r.getDisplayName(), fromString, toString}));
             }
           }
           throw new CommitConflictException(
-              String.format("Entry for key  %s  on region  %s  had a state change",
+              String.format("Entry for key %s on region %s had a state change",
                   new Object[] {key, r.getDisplayName()}));
         }
       } finally {
@@ -1799,7 +1799,7 @@ public class TXEntryState implements Releasable {
         break;
       default:
         throw new IllegalStateException(
-            String.format("<unhandled op  %s  >", opToString()));
+            String.format("<unhandled op %s >", opToString()));
     }
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXEntryUserAttrState.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXEntryUserAttrState.java
index 84b8e22..24de3ac 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXEntryUserAttrState.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXEntryUserAttrState.java
@@ -54,7 +54,7 @@ public class TXEntryUserAttrState {
     if (this.originalValue != curCmtValue) {
       throw new CommitConflictException(
           String.format(
-              "Entry user attribute for key  %s  on region  %s  had already been changed to  %s",
+              "Entry user attribute for key %s on region %s had already been changed to %s",
               new Object[] {key, r.getFullPath(), curCmtValue}));
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXManagerImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXManagerImpl.java
index 6af4a31..64ef61e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXManagerImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXManagerImpl.java
@@ -321,7 +321,7 @@ public class TXManagerImpl implements CacheTransactionManager, MembershipListene
       TransactionId tid = getTransactionId();
       if (tid != null) {
         throw new java.lang.IllegalStateException(
-            String.format("Transaction  %s  already in progress",
+            String.format("Transaction %s already in progress",
                 tid));
       }
     }
@@ -770,7 +770,7 @@ public class TXManagerImpl implements CacheTransactionManager, MembershipListene
       TransactionId tid = getTransactionId();
       if (tid != null) {
         throw new java.lang.IllegalStateException(
-            String.format("Transaction  %s  already in progress",
+            String.format("Transaction %s already in progress",
                 tid));
       }
       if (needToResumeBySameThread) {
@@ -1304,7 +1304,7 @@ public class TXManagerImpl implements CacheTransactionManager, MembershipListene
   public RuntimeException getExceptionForToken(TXCommitMessage msg, TXId txId) {
     if (msg == TXCommitMessage.CMT_CONFLICT_MSG) {
       return new CommitConflictException(
-          String.format("Conflict detected in GemFire transaction  %s",
+          String.format("Conflict detected in GemFire transaction %s",
               txId));
     }
     if (msg == TXCommitMessage.REBALANCE_MSG) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXMessage.java
index 44a9853..04f7f1d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXMessage.java
@@ -75,11 +75,11 @@ public abstract class TXMessage extends SerialDistributionMessage
       InternalCache cache = dm.getCache();
       if (checkCacheClosing(cache) || checkDSClosing(cache.getInternalDistributedSystem())) {
         if (cache == null) {
-          thr = new CacheClosedException(String.format("Remote cache is closed:  %s",
+          thr = new CacheClosedException(String.format("Remote cache is closed: %s",
               dm.getId()));
         } else {
           thr = cache
-              .getCacheClosedException(String.format("Remote cache is closed:  %s",
+              .getCacheClosedException(String.format("Remote cache is closed: %s",
                   dm.getId()));
         }
         return;
@@ -102,7 +102,7 @@ public abstract class TXMessage extends SerialDistributionMessage
         logger.debug("shutdown caught, abandoning message: " + se);
       }
     } catch (RegionDestroyedException rde) {
-      thr = new ForceReattemptException(String.format("Region is destroyed in  %s",
+      thr = new ForceReattemptException(String.format("Region is destroyed in %s",
           dm.getDistributionManagerId()), rde);
     } catch (VirtualMachineError err) {
       SystemFailure.initiateFailure(err);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXReservationMgr.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXReservationMgr.java
index 558adb4..25fda38 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXReservationMgr.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXReservationMgr.java
@@ -103,7 +103,7 @@ public class TXReservationMgr {
         release(localLocks, true);
         throw new CommitConflictException(
             String.format(
-                "The key  %s  in region  %s  was being modified by another transaction locally.",
+                "The key %s in region %s was being modified by another transaction locally.",
                 new Object[] {keys[i], rr.getRegionFullPath()}));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXState.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXState.java
index e322beb..e5e09e0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXState.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXState.java
@@ -1089,7 +1089,7 @@ public class TXState implements TXStateInterface {
       cleanup();
       proxy.getTxMgr().noteCommitFailure(opStart, this.jtaLifeTime, this);
       throw new SynchronizationCommitConflictException(
-          String.format("Conflict detected in GemFire transaction  %s",
+          String.format("Conflict detected in GemFire transaction %s",
               getTransactionId()),
           commitConflict);
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/UpdateOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/UpdateOperation.java
index 0405787..bac3372 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/UpdateOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/UpdateOperation.java
@@ -286,7 +286,7 @@ public class UpdateOperation extends AbstractUpdateOperation {
           break;
         default:
           throw new InternalGemFireError(
-              String.format("unknown deserialization policy:  %s",
+              String.format("unknown deserialization policy: %s",
                   Byte.valueOf(deserializationPolicy)));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/VMLRURegionMap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/VMLRURegionMap.java
index f47a324..b473215 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/VMLRURegionMap.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/VMLRURegionMap.java
@@ -300,7 +300,7 @@ public class VMLRURegionMap extends AbstractRegionMap {
 
     } else {
       throw new InternalGemFireException(
-          String.format("Unknown eviction action:  %s", action));
+          String.format("Unknown eviction action: %s", action));
     }
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ValidatingDiskRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ValidatingDiskRegion.java
index 9dc070d..6d02593 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ValidatingDiskRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ValidatingDiskRegion.java
@@ -83,7 +83,7 @@ public class ValidatingDiskRegion extends DiskRegion implements DiskRecoveryStor
     ValidatingDiskEntry de = new ValidatingDiskEntry(key, re);
     if (this.map.putIfAbsent(key, de) != null) {
       throw new InternalGemFireError(
-          String.format("Entry already existed:  %s", key));
+          String.format("Entry already existed: %s", key));
     }
     return de;
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/MemoryLRUController.java b/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/MemoryLRUController.java
index 0f048d9..5574100 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/MemoryLRUController.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/MemoryLRUController.java
@@ -70,7 +70,7 @@ public class MemoryLRUController extends SizeLRUController {
   private void setMaximumMegabytes(int megabytes) {
     if (megabytes <= 0) {
       throw new IllegalArgumentException(
-          String.format("MemLRUController limit must be postive:  %s",
+          String.format("MemLRUController limit must be postive: %s",
               megabytes));
     }
     this.limit = megabytes * ONE_MEG;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionStreamingResultCollector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionStreamingResultCollector.java
index ac3788a..b9db8bf 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionStreamingResultCollector.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionStreamingResultCollector.java
@@ -381,13 +381,13 @@ public class FunctionStreamingResultCollector extends ReplyProcessor21 implement
               || execution instanceof MultiRegionFunctionExecutor) {
             if (!this.fn.isHA()) {
               fe = new FunctionInvocationTargetException(
-                  String.format("MemberResponse got memberDeparted event for < %s > crashed,  %s",
+                  String.format("MemberResponse got memberDeparted event for < %s > crashed, %s",
                       new Object[] {id, Boolean.valueOf(crashed)}),
                   id);
             } else {
               fe = new InternalFunctionInvocationTargetException(
                   String.format(
-                      "DistributionResponse got memberDeparted event for < %s > crashed,  %s",
+                      "DistributionResponse got memberDeparted event for < %s > crashed, %s",
                       new Object[] {id, Boolean.valueOf(crashed)}),
                   id);
               if (execution.isClientServerMode()) {
@@ -406,7 +406,7 @@ public class FunctionStreamingResultCollector extends ReplyProcessor21 implement
             this.fites.add(fe);
           } else {
             fe = new FunctionInvocationTargetException(
-                String.format("MemberResponse got memberDeparted event for < %s > crashed,  %s",
+                String.format("MemberResponse got memberDeparted event for < %s > crashed, %s",
                     new Object[] {id, Boolean.valueOf(crashed)}),
                 id);
           }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/locks/TXLockServiceImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/locks/TXLockServiceImpl.java
index dd5b312..b770968 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/locks/TXLockServiceImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/locks/TXLockServiceImpl.java
@@ -165,7 +165,7 @@ public class TXLockServiceImpl extends TXLockService {
     synchronized (this.txLockIdList) {
       if (!this.txLockIdList.contains(txLockId)) {
         IllegalArgumentException e = new IllegalArgumentException(
-            String.format("Invalid txLockId not found:  %s",
+            String.format("Invalid txLockId not found: %s",
                 txLockId));
         system.getDistributionManager().getCancelCriterion().checkCancelInProgress(e);
         Cache cache = system.getCache();
@@ -225,7 +225,7 @@ public class TXLockServiceImpl extends TXLockService {
         // which will have a new empty list (txLockIdList) and it will not
         // contain the originally added txLockId
         throw new IllegalArgumentException(
-            String.format("Invalid txLockId not found:  %s",
+            String.format("Invalid txLockId not found: %s",
                 txLockId));
       }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/locks/TXRecoverGrantorMessageProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/locks/TXRecoverGrantorMessageProcessor.java
index a15e4b7..faa6bde 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/locks/TXRecoverGrantorMessageProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/locks/TXRecoverGrantorMessageProcessor.java
@@ -93,7 +93,7 @@ public class TXRecoverGrantorMessageProcessor
       if (replyException == null) {
         replyException = new ReplyException(t);
       } else {
-        logger.warn(String.format("More than one exception thrown in  %s",
+        logger.warn(String.format("More than one exception thrown in %s",
             this),
             t);
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java
index 65ff1cf..f5b1f90 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java
@@ -103,7 +103,7 @@ public class BucketSizeMessage extends PartitionMessage {
       size = ds.getBucketSize(bucketId);
     } else {
       // sender thought this member had a data store, but it doesn't
-      throw new ForceReattemptException(String.format("no datastore in  %s",
+      throw new ForceReattemptException(String.format("no datastore in %s",
           dm.getDistributionManagerId()));
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java
index 765c4c6..0d2889e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java
@@ -128,7 +128,7 @@ public class ContainsKeyValueMessage extends PartitionMessageWithDirectReply {
       logger.fatal("Partitioned Region <> is not configured to store data",
           r.getFullPath());
       ForceReattemptException fre = new ForceReattemptException(
-          String.format("Partitioned Region  %s  on  %s  is not configured to store data",
+          String.format("Partitioned Region %s on %s is not configured to store data",
               new Object[] {r.getFullPath(), dm.getId()}));
       fre.setHash(key.hashCode());
       throw fre;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java
index 925bacf..7055234 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java
@@ -378,7 +378,7 @@ public class DestroyMessage extends PartitionMessageWithDirectReply {
   }
 
   /**
-   * create a new EntryEvent to be used in notifying listeners, bridge servers, etc. Caller must
+   * create a new EntryEvent to be used in notifying listeners, cache servers, etc. Caller must
    * release result if it is != to sourceEvent
    */
   @Retained
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IndexCreationMsg.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IndexCreationMsg.java
index aab8c21..ef8255c 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IndexCreationMsg.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IndexCreationMsg.java
@@ -276,7 +276,7 @@ public class IndexCreationMsg extends PartitionMessage {
       if (pr == null /* && failIfRegionMissing() */) {
         String msg =
             String.format(
-                "Could not get Partitioned Region from Id  %s  for message  %s  received on member= %s  map= %s",
+                "Could not get Partitioned Region from Id %s for message %s received on member= %s map= %s",
                 new Object[] {Integer.valueOf(this.regionId), this, dm.getId(),
                     PartitionedRegion.dumpPRId()});
         throw new PartitionedRegionException(msg, new RegionNotFoundException(msg));
@@ -312,7 +312,7 @@ public class IndexCreationMsg extends PartitionMessage {
           logger.info("Region is locally destroyed, throwing RegionDestroyedException for {}",
               pr);
           thr = new RegionDestroyedException(
-              String.format("Region is locally destroyed on  %s",
+              String.format("Region is locally destroyed on %s",
                   dm.getId()),
               pr.getFullPath());
         }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InvalidateMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InvalidateMessage.java
index 58095dd..e4f09c9 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InvalidateMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InvalidateMessage.java
@@ -71,7 +71,7 @@ public class InvalidateMessage extends DestroyMessage {
   }
 
   /**
-   * added for sending old value over the wire to the bridge servers with Cqs
+   * added for sending old value over the wire to the cache servers with Cqs
    *
    * @param original invalidateMessage originated at remote vm.
    * @param event EntryEventImpl generated by operation on the bucket region.
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRFunctionStreamingResultCollector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRFunctionStreamingResultCollector.java
index a3edb22..152c8dc 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRFunctionStreamingResultCollector.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRFunctionStreamingResultCollector.java
@@ -351,12 +351,12 @@ public class PRFunctionStreamingResultCollector extends FunctionStreamingResultC
         if (removeMember(id, true)) {
           if (!this.fn.isHA()) {
             fite = new FunctionInvocationTargetException(
-                String.format("memberDeparted event for < %s > crashed,  %s",
+                String.format("memberDeparted event for < %s > crashed, %s",
                     new Object[] {id, Boolean.valueOf(crashed)}),
                 id);
           } else {
             fite = new InternalFunctionInvocationTargetException(
-                String.format("memberDeparted event for < %s > crashed,  %s",
+                String.format("memberDeparted event for < %s > crashed, %s",
                     new Object[] {id, Boolean.valueOf(crashed)}),
                 id);
             this.execution.addFailedNode(id.getId());
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java
index 5fae569..739ebf2 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java
@@ -296,10 +296,10 @@ public abstract class PartitionMessage extends DistributionMessage
       if (checkCacheClosing(dm) || checkDSClosing(dm)) {
         if (cache != null) {
           thr = cache
-              .getCacheClosedException(String.format("Remote cache is closed:  %s",
+              .getCacheClosedException(String.format("Remote cache is closed: %s",
                   dm.getId()));
         } else {
-          thr = new CacheClosedException(String.format("Remote cache is closed:  %s",
+          thr = new CacheClosedException(String.format("Remote cache is closed: %s",
               dm.getId()));
         }
         return;
@@ -356,7 +356,7 @@ public abstract class PartitionMessage extends DistributionMessage
       // destroyed, so we must send back an exception. If the sender's
       // region is also destroyed, who cares if we send it an exception
       // if (pr != null && pr.isClosed) {
-      thr = new ForceReattemptException(String.format("Region is destroyed in  %s",
+      thr = new ForceReattemptException(String.format("Region is destroyed in %s",
           dm.getDistributionManagerId()), rde);
       // }
     } catch (VirtualMachineError err) {
@@ -763,7 +763,7 @@ public abstract class PartitionMessage extends DistributionMessage
       if (id != null) {
         if (removeMember(id, true)) {
           this.prce = new ForceReattemptException(
-              String.format("memberDeparted event for < %s > crashed,  %s",
+              String.format("memberDeparted event for < %s > crashed, %s",
                   id, crashed));
         }
         checkIfDone();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutMessage.java
index b02f3ac..331bf19 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutMessage.java
@@ -390,7 +390,7 @@ public class PutMessage extends PartitionMessageWithDirectReply implements NewVa
 
 
   /**
-   * create a new EntryEvent to be used in notifying listeners, bridge servers, etc. Caller must
+   * create a new EntryEvent to be used in notifying listeners, cache servers, etc. Caller must
    * release result if it is != to sourceEvent
    */
   @Retained
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveIndexesMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveIndexesMessage.java
index e6f25e3..68af9ae 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveIndexesMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveIndexesMessage.java
@@ -252,7 +252,7 @@ public class RemoveIndexesMessage extends PartitionMessage {
       if (pr == null /* && failIfRegionMissing() */ ) {
         throw new PartitionedRegionException(
             String.format(
-                "Could not get Partitioned Region from Id  %s  for message  %s  received on member= %s  map= %s",
+                "Could not get Partitioned Region from Id %s for message %s received on member= %s map= %s",
                 new Object[] {Integer.valueOf(this.regionId), this, dm.getId(),
                     PartitionedRegion.dumpPRId()}));
       }
@@ -289,7 +289,7 @@ public class RemoveIndexesMessage extends PartitionMessage {
           logger.info("Region is locally destroyed, throwing RegionDestroyedException for {}",
               pr);
           thr = new RegionDestroyedException(
-              String.format("Region is locally destroyed on  %s",
+              String.format("Region is locally destroyed on %s",
                   dm.getId()),
               pr.getFullPath());
         }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizeMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizeMessage.java
index de1592f..3f28930 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizeMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizeMessage.java
@@ -171,7 +171,7 @@ public class SizeMessage extends PartitionMessage {
       }
       ReplyMessage.send(getSender(), getProcessorId(),
           new ReplyException(new ForceReattemptException(
-              String.format("%s : could not find partitioned region with Id  %s",
+              String.format("%s : could not find partitioned region with Id %s",
                   dm.getDistributionManagerId(), Integer.valueOf(regionId)))),
           dm, r != null && r.isInternalRegion());
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/StreamingPartitionOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/StreamingPartitionOperation.java
index a3fadb1..6c3cc48 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/StreamingPartitionOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/StreamingPartitionOperation.java
@@ -206,7 +206,7 @@ public abstract class StreamingPartitionOperation extends StreamingOperation {
         this.failedMembers.add(id);
         this.memberDepartedMessage =
             String.format(
-                "Streaming reply processor got memberDeparted event for < %s > crashed,  %s",
+                "Streaming reply processor got memberDeparted event for < %s > crashed, %s",
                 new Object[] {id, Boolean.valueOf(crashed)});
       }
       super.memberDeparted(distributionManager, id, crashed);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/DiskInitFileParser.java b/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/DiskInitFileParser.java
index 2cd1f5f..5350a90 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/DiskInitFileParser.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/persistence/DiskInitFileParser.java
@@ -475,7 +475,7 @@ public class DiskInitFileParser {
           break;
         default:
           throw new DiskAccessException(
-              String.format("Unknown opCode  %s  found in disk initialization file.", opCode),
+              String.format("Unknown opCode %s found in disk initialization file.", opCode),
               this.interpreter.getNameForError());
       }
       if (interpreter.isClosing()) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/Acceptor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/Acceptor.java
index ae6f199..7108c54 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/Acceptor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/Acceptor.java
@@ -19,7 +19,7 @@ import java.io.IOException;
 import org.apache.geode.internal.cache.tier.sockets.CacheClientNotifier;
 
 /**
- * Defines the message listener/acceptor interface which is the GemFire Bridge Server. Multiple
+ * Defines the message listener/acceptor interface which is the GemFire cache server. Multiple
  * communication stacks may provide implementations for the interfaces defined in this package
  *
  * @since GemFire 2.0.2
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/AcceptorImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/AcceptorImpl.java
index ade5a81..16c701a 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/AcceptorImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/AcceptorImpl.java
@@ -97,7 +97,7 @@ import org.apache.geode.internal.tcp.ConnectionTable;
 import org.apache.geode.internal.util.ArrayUtils;
 
 /**
- * Implements the acceptor thread on the bridge server. Accepts connections from the edge and starts
+ * Implements the acceptor thread on the cache server. Accepts connections from the edge and starts
  * up threads to process requests from these.
  *
  * @since GemFire 2.0.2
@@ -151,7 +151,7 @@ public class AcceptorImpl implements Acceptor, Runnable, CommBufferPool {
   private final Object syncLock = new Object();
 
   /**
-   * THE selector for the bridge server; null if no selector.
+   * THE selector for the cache server; null if no selector.
    */
   private final Selector selector;
   // private final Selector tmpSel;
@@ -1373,7 +1373,7 @@ public class AcceptorImpl implements Acceptor, Runnable, CommBufferPool {
       return;
     }
 
-    logger.debug("Bridge server: Initializing {} communication socket: {}", communicationMode,
+    logger.debug("cache server: Initializing {} communication socket: {}", communicationMode,
         socket);
     boolean notForQueue = (communicationMode != ClientToServerForQueue);
     if (notForQueue) {
@@ -1683,7 +1683,7 @@ public class AcceptorImpl implements Acceptor, Runnable, CommBufferPool {
   }
 
   /**
-   * Gets the address that this bridge server can be contacted on from external processes.
+   * Gets the address that this cache server can be contacted on from external processes.
    *
    * @since GemFire 5.7
    */
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
index 52275b1..9d5e490 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
@@ -94,7 +94,7 @@ public abstract class BaseCommand implements Command {
       Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "bridge.suppressIOExceptionLogging");
 
   /**
-   * Maximum number of concurrent incoming client message bytes that a bridge server will allow.
+   * Maximum number of concurrent incoming client message bytes that a cache server will allow.
    * Once a server is working on this number additional incoming client messages will wait until one
    * of them completes or fails. The bytes are computed based in the size sent in the incoming msg
    * header.
@@ -103,7 +103,7 @@ public abstract class BaseCommand implements Command {
       Integer.getInteger("BridgeServer.MAX_INCOMING_DATA", -1);
 
   /**
-   * Maximum number of concurrent incoming client messages that a bridge server will allow. Once a
+   * Maximum number of concurrent incoming client messages that a cache server will allow. Once a
    * server is working on this number additional incoming client messages will wait until one of
    * them completes or fails.
    */
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommandQuery.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommandQuery.java
index cae6fdf..8adde73 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommandQuery.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommandQuery.java
@@ -230,7 +230,7 @@ public abstract class BaseCommandQuery extends BaseCommand {
           writeQueryResponseChunk(result, null, true, servConn);
         }
       } else {
-        throw new QueryInvalidException(String.format("Unknown result type:  %s",
+        throw new QueryInvalidException(String.format("Unknown result type: %s",
             result.getClass()));
       }
       msg.clearParts();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
index 1218311..84f5715 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
@@ -389,7 +389,7 @@ public class CacheClientNotifier {
           new Object[] {this, e}),
           e);
       IOException io = new IOException(
-          String.format("Exception occurred while trying to register interest due to :  %s",
+          String.format("Exception occurred while trying to register interest due to : %s",
               e.getMessage()));
       io.initCause(e);
       throw io;
@@ -1097,7 +1097,7 @@ public class CacheClientNotifier {
       throws Exception {
     if (!supportsOperation(operation)) {
       throw new Exception(
-          String.format("The cache client notifier does not support operations of type  %s",
+          String.format("The cache client notifier does not support operations of type %s",
               operation));
     }
     // String regionName = event.getRegion().getFullPath();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
index 1debc21..4d04017 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
@@ -1782,7 +1782,7 @@ public class CacheClientUpdater extends LoggingThread implements ClientUpdater,
 
     // If this was a surprise, print a warning.
     if (unexpected && !(exception instanceof CancelException)) {
-      logger.warn(String.format("%s :  %s : %s",
+      logger.warn(String.format("%s : %s : %s",
           new Object[] {this, message, exception}), exception);
     }
     // We can't shutdown the client updater just because of an exception.
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
index c17c10e..3c075a9 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
@@ -201,7 +201,7 @@ public class ChunkedMessage extends Message {
         cb.clear();
         if (!MessageType.validate(type)) {
           throw new IOException(
-              String.format("Invalid message type  %s  while reading header",
+              String.format("Invalid message type %s while reading header",
                   Integer.valueOf(type)));
         }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientHealthMonitor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientHealthMonitor.java
index 1551cba..eff009b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientHealthMonitor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientHealthMonitor.java
@@ -87,7 +87,7 @@ public class ClientHealthMonitor {
   static ClientHealthMonitor _instance;
 
   /**
-   * Reference count in the event that multiple bridge servers are using the health monitor
+   * Reference count in the event that multiple cache servers are using the health monitor
    */
 
   private static int refCount = 0;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/HAEventWrapper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/HAEventWrapper.java
index a755efa..99859d3 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/HAEventWrapper.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/HAEventWrapper.java
@@ -73,7 +73,7 @@ public class HAEventWrapper implements Conflatable, DataSerializableFixedID, Siz
   private EventID eventIdentifier;
 
   /**
-   * The underlying map for all the ha region queues associated with a bridge server.
+   * The underlying map for all the ha region queues associated with a cache server.
    */
   private Map haContainer;
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
index 65a6725..4876de6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
@@ -668,7 +668,7 @@ public class Message {
     cb.clear();
 
     if (!MessageType.validate(type)) {
-      throw new IOException(String.format("Invalid message type  %s  while reading header",
+      throw new IOException(String.format("Invalid message type %s while reading header",
           type));
     }
 
@@ -694,7 +694,7 @@ public class Message {
               }
               throw new IOException(
                   String.format(
-                      "Operation timed out on server waiting on concurrent message limiter after waiting  %s  milliseconds",
+                      "Operation timed out on server waiting on concurrent message limiter after waiting %s milliseconds",
                       timeToWait));
             }
           }
@@ -711,7 +711,7 @@ public class Message {
 
     if (len > 0) {
       if (this.maxIncomingMessageLength > 0 && len > this.maxIncomingMessageLength) {
-        throw new IOException(String.format("Message size  %s  exceeded max limit of  %s",
+        throw new IOException(String.format("Message size %s exceeded max limit of %s",
             new Object[] {len, this.maxIncomingMessageLength}));
       }
 
@@ -734,7 +734,7 @@ public class Message {
                   || !this.messageLimiter.tryAcquire(1, newTimeToWait, TimeUnit.MILLISECONDS)) {
                 throw new IOException(
                     String.format(
-                        "Operation timed out on server waiting on concurrent data limiter after waiting  %s  milliseconds",
+                        "Operation timed out on server waiting on concurrent data limiter after waiting %s milliseconds",
                         timeToWait));
               }
             }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
index d7a7de7..5c6b523 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
@@ -1661,7 +1661,7 @@ public abstract class ServerConnection implements Runnable {
         break;
       default:
         throw new IllegalArgumentException(
-            String.format("The ID passed is  %s  which does not correspond with any transient data",
+            String.format("The ID passed is %s which does not correspond with any transient data",
                 boolID));
     }
   }
@@ -1680,7 +1680,7 @@ public abstract class ServerConnection implements Runnable {
         break;
       default:
         throw new IllegalArgumentException(
-            String.format("The ID passed is  %s  which does not correspond with any transient data",
+            String.format("The ID passed is %s which does not correspond with any transient data",
                 boolID));
     }
     return retVal;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteDestroyMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteDestroyMessage.java
index 27b0b54..55e6d57 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteDestroyMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteDestroyMessage.java
@@ -154,7 +154,7 @@ public class RemoteDestroyMessage extends RemoteOperationMessageWithDirectReply
     this.versionTag = event.getVersionTag();
     Assert.assertTrue(this.eventId != null);
 
-    // added for old value if available sent over the wire for bridge servers.
+    // added for old value if available sent over the wire for cache servers.
     if (event.hasOldValue()) {
       this.hasOldValue = true;
       event.exportOldValue(this);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemotePutMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemotePutMessage.java
index 3ed1fc6..61bda81 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemotePutMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemotePutMessage.java
@@ -227,7 +227,7 @@ public class RemotePutMessage extends RemoteOperationMessageWithDirectReply
       assert this.deserializationPolicy == DistributedCacheOperation.DESERIALIZATION_POLICY_NONE : this.deserializationPolicy;
     }
 
-    // added for cqs on Bridge Servers. rdubey
+    // added for cqs on cache servers. rdubey
 
 
     if (event.hasOldValue()) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java
index 7c3b573..2ee6126 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheCreation.java
@@ -204,7 +204,7 @@ public class CacheCreation implements InternalCache {
   private boolean hasServer = false;
 
   /**
-   * The bridge servers configured for this cache
+   * The cache servers configured for this cache
    */
   private final List<CacheServer> bridgeServers = new ArrayList<>();
 
@@ -739,7 +739,7 @@ public class CacheCreation implements InternalCache {
 
       } catch (IOException ex) {
         throw new GemFireIOException(
-            String.format("While starting cache server  %s", impl),
+            String.format("While starting cache server %s", impl),
             ex);
       }
     }
@@ -932,12 +932,12 @@ public class CacheCreation implements InternalCache {
         if (otherAttrs == null) {
           getLogger().fine("No attributes for " + myId);
           throw new RuntimeException(
-              String.format("No attributes for  %s", myId));
+              String.format("No attributes for %s", myId));
 
         } else {
           if (!myAttrs.sameAs(otherAttrs)) {
             getLogger().fine("Attributes for " + myId + " do not match");
-            throw new RuntimeException(String.format("Attributes for  %s  do not match",
+            throw new RuntimeException(String.format("Attributes for %s do not match",
                 myId));
           }
         }
@@ -954,7 +954,7 @@ public class CacheCreation implements InternalCache {
         Region<Object, Object> otherRegion = other.getRegion(rootRegion.getName());
         if (otherRegion == null) {
           throw new RuntimeException(
-              String.format("no root  %s", rootRegion.getName()));
+              String.format("no root %s", rootRegion.getName()));
         } else if (!rootRegion.sameAs(otherRegion)) {
           throw new RuntimeException(
               "regions differ");
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheServerCreation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheServerCreation.java
index fdd8935..dff23e7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheServerCreation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheServerCreation.java
@@ -50,7 +50,7 @@ public class CacheServerCreation extends AbstractCacheServer {
   }
 
   /**
-   * Constructor for retaining bridge server information during auto-reconnect
+   * Constructor for retaining cache server information during auto-reconnect
    *
    */
   public CacheServerCreation(InternalCache cache, CacheServer other) {
@@ -160,7 +160,7 @@ public class CacheServerCreation extends AbstractCacheServer {
   }
 
   /**
-   * Returns whether or not this bridge server has the same configuration as another bridge server.
+   * Returns whether or not this cache server has the same configuration as another cache server.
    */
   @Override
   public boolean sameAs(CacheServer other) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXml.java b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXml.java
index 9474cc2..1257563 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXml.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXml.java
@@ -837,7 +837,7 @@ public abstract class CacheXml implements EntityResolver2, ErrorHandler {
       result = new InputSource(stream);
     } else {
       throw new SAXNotRecognizedException(
-          String.format("DTD not found:  %s", location));
+          String.format("DTD not found: %s", location));
     }
     return result;
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXmlGenerator.java b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXmlGenerator.java
index a8c2989..13922d6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXmlGenerator.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXmlGenerator.java
@@ -1809,7 +1809,7 @@ public class CacheXmlGenerator extends CacheXml implements XMLReader {
 
       } else {
         throw new InternalGemFireException(
-            String.format("Unknown scope:  %s", scope));
+            String.format("Unknown scope: %s", scope));
       }
 
 
@@ -1897,7 +1897,7 @@ public class CacheXmlGenerator extends CacheXml implements XMLReader {
           }
         } else {
           throw new InternalGemFireException(
-              String.format("Unknown data policy:  %s", dp));
+              String.format("Unknown data policy: %s", dp));
         }
 
         if (generateDefaults() || !dp.equals(DataPolicy.DEFAULT))
@@ -1924,7 +1924,7 @@ public class CacheXmlGenerator extends CacheXml implements XMLReader {
           mirrorString = KEYS_VALUES;
         else
           throw new InternalGemFireException(
-              String.format("Unknown mirror type:  %s", mirror));
+              String.format("Unknown mirror type: %s", mirror));
         atts.addAttribute("", "", MIRROR_TYPE, "", mirrorString);
       }
       if ((!(attrs instanceof RegionAttributesCreation)
@@ -2306,7 +2306,7 @@ public class CacheXmlGenerator extends CacheXml implements XMLReader {
 
     } else {
       throw new InternalGemFireException(
-          String.format("Unknown ExpirationAction:  %s", action));
+          String.format("Unknown ExpirationAction: %s", action));
     }
 
     atts.addAttribute("", "", ACTION, "", actionString);
@@ -2341,7 +2341,7 @@ public class CacheXmlGenerator extends CacheXml implements XMLReader {
       interestString = CACHE_CONTENT;
     } else {
       throw new InternalGemFireException(
-          String.format("Unknown InterestPolicy:  %s", ip));
+          String.format("Unknown InterestPolicy: %s", ip));
     }
 
     atts.addAttribute("", "", INTEREST_POLICY, "", interestString);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXmlParser.java b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXmlParser.java
index 306c35f..cdd246e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXmlParser.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/CacheXmlParser.java
@@ -951,7 +951,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Declarable d = createDeclarable();
     if (!(d instanceof TransactionListener)) {
       throw new CacheXmlException(
-          String.format("A  %s  is not an instance of a CacheListener.",
+          String.format("A %s is not an instance of a CacheListener.",
               d.getClass().getName()));
     }
     CacheTransactionManagerCreation txMgrCreation = (CacheTransactionManagerCreation) stack.peek();
@@ -1044,7 +1044,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Declarable d = createDeclarable();
     if (!(d instanceof TransactionWriter)) {
       throw new CacheXmlException(
-          String.format("A  %s  is not an instance of a TransactionWriter.",
+          String.format("A %s is not an instance of a TransactionWriter.",
               d.getClass().getName()));
     }
     CacheTransactionManagerCreation txMgrCreation = (CacheTransactionManagerCreation) stack.peek();
@@ -1069,7 +1069,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       attrs.setScope(Scope.GLOBAL);
     } else {
       throw new InternalGemFireException(
-          String.format("Unknown scope:  %s", scope));
+          String.format("Unknown scope: %s", scope));
     }
     String mirror = atts.getValue(MIRROR_TYPE);
     if (mirror == null) {
@@ -1081,7 +1081,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       attrs.setMirrorType(MirrorType.KEYS_VALUES);
     } else {
       throw new InternalGemFireException(
-          String.format("Unknown mirror type:  %s", mirror));
+          String.format("Unknown mirror type: %s", mirror));
     }
     {
       String dp = atts.getValue(DATA_POLICY);
@@ -1102,7 +1102,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
         attrs.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
       } else {
         throw new InternalGemFireException(
-            String.format("Unknown data policy:  %s", dp));
+            String.format("Unknown data policy: %s", dp));
       }
     }
 
@@ -1337,7 +1337,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       c = InternalDataSerializer.getCachedClass(className);
     } catch (Exception ex) {
       throw new CacheXmlException(
-          String.format("Could not load key-constraint class:  %s",
+          String.format("Could not load key-constraint class: %s",
               className),
           ex);
     }
@@ -1359,7 +1359,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       c = InternalDataSerializer.getCachedClass(className);
     } catch (Exception ex) {
       throw new CacheXmlException(
-          String.format("Could not load value-constraint class:  %s",
+          String.format("Could not load value-constraint class: %s",
               className),
           ex);
     }
@@ -1394,7 +1394,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Object a = stack.peek();
     if (!(a instanceof RegionAttributesCreation)) {
       throw new CacheXmlException(
-          String.format("A  %s  must be defined in the context of region-attributes.",
+          String.format("A %s must be defined in the context of region-attributes.",
               dependentElement));
     }
     return (RegionAttributesCreation) a;
@@ -1435,7 +1435,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     } else {
       throw new CacheXmlException(
           String.format(
-              "A  %s  must be defined in the context of region-attributes or partition-attributes.",
+              "A %s must be defined in the context of region-attributes or partition-attributes.",
               ENTRY_TIME_TO_LIVE));
     }
   }
@@ -1463,7 +1463,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     } else {
       throw new CacheXmlException(
           String.format(
-              "A  %s  must be defined in the context of region-attributes or partition-attributes.",
+              "A %s must be defined in the context of region-attributes or partition-attributes.",
               ENTRY_IDLE_TIME));
     }
   }
@@ -1587,7 +1587,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       attrs.setDiskDirsAndSize(disks, diskSizes);
     } else {
       throw new CacheXmlException(
-          String.format("A  %s  must be defined in the context of region-attributes.",
+          String.format("A %s must be defined in the context of region-attributes.",
               DISK_DIRS));
     }
   }
@@ -1721,7 +1721,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       sa = new SubscriptionAttributes(InterestPolicy.CACHE_CONTENT);
     } else {
       throw new InternalGemFireException(
-          String.format("Unknown interest-policy:  %s", ip));
+          String.format("Unknown interest-policy: %s", ip));
     }
     RegionAttributesCreation rattrs = (RegionAttributesCreation) stack.peek();
     rattrs.setSubscriptionAttributes(sa);
@@ -1892,7 +1892,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       expire = new ExpirationAttributes(timeout, ExpirationAction.LOCAL_DESTROY);
     } else {
       throw new InternalGemFireException(
-          String.format("Unknown expiration action:  %s", action));
+          String.format("Unknown expiration action: %s", action));
     }
     stack.push(expire);
   }
@@ -1981,7 +1981,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Class<?> klass = getClassFromStack();
     if (!Compressor.class.isAssignableFrom(klass)) {
       throw new CacheXmlException(
-          String.format("A  %s  is not an instance of a Compressor.",
+          String.format("A %s is not an instance of a Compressor.",
               klass.getName()));
     }
 
@@ -1989,7 +1989,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     try {
       compressor = (Compressor) klass.newInstance();
     } catch (Exception ex) {
-      throw new CacheXmlException(String.format("While instantiating a  %s",
+      throw new CacheXmlException(String.format("While instantiating a %s",
           klass.getName()), ex);
     }
 
@@ -2000,7 +2000,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       attrs.setCompressor(compressor);
     } else {
       throw new CacheXmlException(
-          String.format("A  %s  must be defined in the context of region-attributes or  %s",
+          String.format("A %s must be defined in the context of region-attributes or %s",
               new Object[] {COMPRESSOR, DYNAMIC_REGION_FACTORY}));
     }
   }
@@ -2017,7 +2017,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Declarable d = createDeclarable();
     if (!(d instanceof CacheLoader)) {
       throw new CacheXmlException(
-          String.format("A  %s  is not an instance of a CacheLoader.",
+          String.format("A %s is not an instance of a CacheLoader.",
               d.getClass().getName()));
     }
     // Two peeks required to handle dynamic region context
@@ -2040,7 +2040,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       attrs.setCacheLoader((CacheLoader) d);
     } else {
       throw new CacheXmlException(
-          String.format("A  %s  must be defined in the context of region-attributes or  %s",
+          String.format("A %s must be defined in the context of region-attributes or %s",
               new Object[] {CACHE_LOADER, DYNAMIC_REGION_FACTORY}));
     }
   }
@@ -2057,7 +2057,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Declarable d = createDeclarable();
     if (!(d instanceof CacheWriter)) {
       throw new CacheXmlException(
-          String.format("A  %s  is not an instance of a CacheWriter.",
+          String.format("A %s is not an instance of a CacheWriter.",
               d.getClass().getName()));
     }
 
@@ -2076,7 +2076,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       //
       if (!(a instanceof RegionAttributesCreation)) {
         throw new CacheXmlException(
-            String.format("%s  must be defined in the context of  %s",
+            String.format("%s must be defined in the context of %s",
                 new Object[] {CACHE_WRITER, DYNAMIC_REGION_FACTORY}));
       }
       stack.push(size);
@@ -2085,7 +2085,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     // check for normal region-attributes
     else if (!(a instanceof RegionAttributesCreation)) {
       throw new CacheXmlException(
-          String.format("%s  must be defined in the context of region-attributes.",
+          String.format("%s must be defined in the context of region-attributes.",
               CACHE_WRITER));
     }
 
@@ -2159,7 +2159,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       d = createDeclarable();
       if (!(d instanceof ObjectSizer)) {
         throw new CacheXmlException(
-            String.format("A  %s  is not an instance of a ObjectSizer.",
+            String.format("A %s is not an instance of a ObjectSizer.",
                 d.getClass().getName()));
       }
     }
@@ -2218,7 +2218,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Declarable d = createDeclarable();
     if (!(d instanceof CacheListener)) {
       throw new CacheXmlException(
-          String.format("A  %s  is not an instance of a CacheListener.",
+          String.format("A %s is not an instance of a CacheListener.",
               d.getClass().getName()));
     }
     RegionAttributesCreation attrs = peekRegionAttributesContext(CACHE_LISTENER);
@@ -2326,7 +2326,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Declarable d = createDeclarable();
     if (!(d instanceof AsyncEventListener)) {
       throw new CacheXmlException(
-          String.format("A  %s  is not an instance of a AsyncEventListener",
+          String.format("A %s is not an instance of a AsyncEventListener",
               d.getClass().getName()));
     }
     AsyncEventQueueCreation eventChannel = peekAsyncEventQueueContext(ASYNC_EVENT_LISTENER);
@@ -2337,7 +2337,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Object a = stack.peek();
     if (!(a instanceof AsyncEventQueueCreation)) {
       throw new CacheXmlException(
-          String.format("A  %s  must be defined in the context of async-event-queue.",
+          String.format("A %s must be defined in the context of async-event-queue.",
               dependentElement));
     }
     return (AsyncEventQueueCreation) a;
@@ -3149,7 +3149,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     } else {
       throw new CacheXmlException(
           String.format(
-              "A  %s  must be defined in the context of gateway-sender or gateway-receiver.",
+              "A %s must be defined in the context of gateway-sender or gateway-receiver.",
               GATEWAY_TRANSPORT_FILTER));
     }
   }
@@ -3170,7 +3170,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     } else {
       throw new CacheXmlException(
           String.format(
-              "A  %s  must be defined in the context of gateway-sender or async-event-queue.",
+              "A %s must be defined in the context of gateway-sender or async-event-queue.",
               "GatewayEventFilter"));
     }
   }
@@ -3192,7 +3192,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     } else {
       throw new CacheXmlException(
           String.format(
-              "A  %s  must be defined in the context of gateway-sender or async-event-queue.",
+              "A %s must be defined in the context of gateway-sender or async-event-queue.",
               "GatewayEventSubstitutionFilter"));
     }
   }
@@ -3201,7 +3201,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Object a = stack.peek();
     if (!(a instanceof GatewaySenderFactory)) {
       throw new CacheXmlException(
-          String.format("A  %s  must be defined in the context of gateway-sender.",
+          String.format("A %s must be defined in the context of gateway-sender.",
               dependentElement));
     }
     return (GatewaySenderFactory) a;
@@ -3211,7 +3211,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     Declarable d = createDeclarable();
     if (!(d instanceof PdxSerializer)) {
       throw new CacheXmlException(
-          String.format("A  %s  is not an instance of a PdxSerializer.",
+          String.format("A %s is not an instance of a PdxSerializer.",
               d.getClass().getName()));
     }
     PdxSerializer serializer = (PdxSerializer) d;
@@ -3238,7 +3238,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       o = c.newInstance();
     } catch (Exception ex) {
       throw new CacheXmlException(
-          String.format("While instantiating a  %s", className), ex);
+          String.format("While instantiating a %s", className), ex);
     }
     if (!(o instanceof Declarable)) {
       throw new CacheXmlException(
@@ -3269,7 +3269,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     while (!top.equals(globalOrLocal)) {
       if (!(top instanceof Parameter)) {
         throw new CacheXmlException(
-            String.format("Only a parameter is allowed in the context of  %s",
+            String.format("Only a parameter is allowed in the context of %s",
                 globalOrLocal));
       }
       Parameter param = (Parameter) top;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/DiskStoreAttributesCreation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/DiskStoreAttributesCreation.java
index 27c0e2d..54af1cf 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/DiskStoreAttributesCreation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/DiskStoreAttributesCreation.java
@@ -174,43 +174,43 @@ public class DiskStoreAttributesCreation extends UserSpecifiedDiskStoreAttribute
   public boolean sameAs(DiskStore other) {
     if (this.autoCompact != other.getAutoCompact()) {
       throw new RuntimeException(
-          String.format("AutoCompact of disk store %s is not the same: this:  %s  other:  %s",
+          String.format("AutoCompact of disk store %s is not the same: this: %s other: %s",
               new Object[] {name, this.autoCompact, other.getAutoCompact()}));
     }
     if (this.compactionThreshold != other.getCompactionThreshold()) {
       throw new RuntimeException(
           String.format(
-              "CompactionThreshold of disk store %s is not the same: this:  %s  other:  %s",
+              "CompactionThreshold of disk store %s is not the same: this: %s other: %s",
 
               new Object[] {name, this.compactionThreshold, other.getCompactionThreshold()}));
     }
     if (this.allowForceCompaction != other.getAllowForceCompaction()) {
       throw new RuntimeException(
           String.format(
-              "AllowForceCompaction of disk store %s is not the same: this:  %s  other:  %s",
+              "AllowForceCompaction of disk store %s is not the same: this: %s other: %s",
 
               new Object[] {name, this.allowForceCompaction, other.getAllowForceCompaction()}));
     }
     if (this.maxOplogSizeInBytes != other.getMaxOplogSize() * 1024 * 1024) {
       throw new RuntimeException(
-          String.format("MaxOpLogSize of disk store %s is not the same: this:  %s  other:  %s",
+          String.format("MaxOpLogSize of disk store %s is not the same: this: %s other: %s",
               new Object[] {name, this.maxOplogSizeInBytes / 1024 / 1024,
                   other.getMaxOplogSize()}));
     }
     if (this.timeInterval != other.getTimeInterval()) {
       throw new RuntimeException(
-          String.format("TimeInterval of disk store %s is not the same: this:  %s  other:  %s",
+          String.format("TimeInterval of disk store %s is not the same: this: %s other: %s",
               new Object[] {name, this.timeInterval, other.getTimeInterval()}));
     }
     if (this.writeBufferSize != other.getWriteBufferSize()) {
       throw new RuntimeException(
-          String.format("WriteBufferSize of disk store %s is not the same: this:  %s  other:  %s",
+          String.format("WriteBufferSize of disk store %s is not the same: this: %s other: %s",
 
               new Object[] {name, this.writeBufferSize, other.getWriteBufferSize()}));
     }
     if (this.queueSize != other.getQueueSize()) {
       throw new RuntimeException(
-          String.format("QueueSize of disk store %s is not the same: this:  %s  other:  %s",
+          String.format("QueueSize of disk store %s is not the same: this: %s other: %s",
               new Object[] {name, this.queueSize, other.getQueueSize()}));
     }
     if (!equal(this.diskDirs, other.getDiskDirs())) {
@@ -291,7 +291,7 @@ public class DiskStoreAttributesCreation extends UserSpecifiedDiskStoreAttribute
     if (sizes.length != this.diskDirs.length) {
       throw new IllegalArgumentException(
           String.format(
-              "Number of diskSizes is  %s  which is not equal to number of disk Dirs which is  %s",
+              "Number of diskSizes is %s which is not equal to number of disk Dirs which is %s",
 
               new Object[] {Integer.valueOf(sizes.length), Integer.valueOf(diskDirs.length)}));
     }
@@ -334,7 +334,7 @@ public class DiskStoreAttributesCreation extends UserSpecifiedDiskStoreAttribute
     for (int i = 0; i < sizes.length; i++) {
       if (sizes[i] < 0) {
         throw new IllegalArgumentException(
-            String.format("Dir size cannot be negative :  %s for disk store %s",
+            String.format("Dir size cannot be negative : %s for disk store %s",
                 new Object[] {Integer.valueOf(sizes[i]), name}));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/RegionAttributesCreation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/RegionAttributesCreation.java
index e21a397..3eec93e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/RegionAttributesCreation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/RegionAttributesCreation.java
@@ -443,7 +443,7 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes
     }
     if (this.dataPolicy != other.getDataPolicy()) {
       throw new RuntimeException(
-          String.format("Data Policies are not the same: this:  %s  other:  %s",
+          String.format("Data Policies are not the same: this: %s other: %s",
               new Object[] {this.getDataPolicy(), other.getDataPolicy()}));
     }
     if (this.earlyAck != other.getEarlyAck()) {
@@ -521,7 +521,7 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes
     }
     if (this.cloningEnabled != other.getCloningEnabled()) {
       throw new RuntimeException(
-          String.format("Cloning enabled is not the same: this:  %s  other:  %s",
+          String.format("Cloning enabled is not the same: this: %s other: %s",
               new Object[] {Boolean.valueOf(this.cloningEnabled),
                   Boolean.valueOf(other.getCloningEnabled())}));
     }
@@ -1017,7 +1017,7 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes
     for (int i = 0; i < sizes.length; i++) {
       if (sizes[i] < 0) {
         throw new IllegalArgumentException(
-            String.format("Dir size cannot be negative :  %s",
+            String.format("Dir size cannot be negative : %s",
                 Integer.valueOf(sizes[i])));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/RegionCreation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/RegionCreation.java
index c99302f..0dd9a11 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/RegionCreation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/xmlcache/RegionCreation.java
@@ -307,13 +307,13 @@ public class RegionCreation implements Region, Extensible<Region<?, ?>> {
     }
 
     if (!this.getName().equals(other.getName())) {
-      throw new RuntimeException(String.format("region names differ: this:  %s  other:  %s",
+      throw new RuntimeException(String.format("region names differ: this: %s other: %s",
           new Object[] {this.getName(), other.getName()}));
     }
 
     if (!this.attrs.sameAs(other.getAttributes())) {
       throw new RuntimeException(
-          String.format("region attributes differ this:  %s  other:  %s",
+          String.format("region attributes differ this: %s other: %s",
               new Object[] {this.attrs, other.getAttributes()}));
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/datasource/FacetsJCAConnectionManagerImpl.java b/geode-core/src/main/java/org/apache/geode/internal/datasource/FacetsJCAConnectionManagerImpl.java
index c3b5a29..b3538db 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/datasource/FacetsJCAConnectionManagerImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/datasource/FacetsJCAConnectionManagerImpl.java
@@ -105,7 +105,7 @@ public class FacetsJCAConnectionManagerImpl
       ex.printStackTrace();
       throw new ResourceException(
           String.format(
-              "FacetsJCAConnectionManagerImpl:: allocateConnection : in getting connection from pool due to  %s",
+              "FacetsJCAConnectionManagerImpl:: allocateConnection : in getting connection from pool due to %s",
               ex.getMessage()),
           ex);
     }
@@ -147,7 +147,7 @@ public class FacetsJCAConnectionManagerImpl
     } catch (SystemException ex) {
       throw new ResourceException(
           String.format(
-              "FacetsJCAConnectionManagerImpl:: allocateConnection :system exception due to  %s",
+              "FacetsJCAConnectionManagerImpl:: allocateConnection :system exception due to %s",
               ex.getMessage()),
           ex);
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/datasource/JCAConnectionManagerImpl.java b/geode-core/src/main/java/org/apache/geode/internal/datasource/JCAConnectionManagerImpl.java
index 9df66b1..830fcf1 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/datasource/JCAConnectionManagerImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/datasource/JCAConnectionManagerImpl.java
@@ -94,7 +94,7 @@ public class JCAConnectionManagerImpl implements ConnectionManager, ConnectionEv
       // ex.printStackTrace();
       throw new ResourceException(
           String.format(
-              "JCAConnectionManagerImpl:: allocateConnection : in getting connection from pool due to  %s",
+              "JCAConnectionManagerImpl:: allocateConnection : in getting connection from pool due to %s",
               ex.getMessage()),
           ex);
     }
@@ -120,13 +120,13 @@ public class JCAConnectionManagerImpl implements ConnectionManager, ConnectionEv
       }
     } catch (RollbackException ex) {
       throw new ResourceException(
-          String.format("JCAConnectionManagerImpl:: allocateConnection : in transaction due to  %s",
+          String.format("JCAConnectionManagerImpl:: allocateConnection : in transaction due to %s",
               ex.getMessage()),
           ex);
     } catch (SystemException ex) {
       throw new ResourceException(
           String.format(
-              "JCAConnectionManagerImpl:: allocateConnection :system exception due to  %s",
+              "JCAConnectionManagerImpl:: allocateConnection :system exception due to %s",
               ex.getMessage()),
           ex);
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/jndi/ContextImpl.java b/geode-core/src/main/java/org/apache/geode/internal/jndi/ContextImpl.java
index e24009d..fb80157 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/jndi/ContextImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/jndi/ContextImpl.java
@@ -187,7 +187,7 @@ public class ContextImpl implements Context {
         return subContext;
       } else {
         throw new NameAlreadyBoundException(
-            String.format("Name  %s  is already bound!", subContextName));
+            String.format("Name %s is already bound!", subContextName));
       }
     } else {
       if (boundObject instanceof Context) {
@@ -198,7 +198,7 @@ public class ContextImpl implements Context {
         // an exception will be thrown in that case.
         return ((Context) boundObject).createSubcontext(parsedName.getSuffix(1));
       } else {
-        throw new NotContextException(String.format("Expected Context but found  %s",
+        throw new NotContextException(String.format("Expected Context but found %s",
             boundObject));
       }
     }
@@ -240,7 +240,7 @@ public class ContextImpl implements Context {
     String subContextName = parsedName.get(0);
     Object boundObject = ctxMaps.get(subContextName);
     if (boundObject == null) {
-      throw new NameNotFoundException(String.format("Name  %s not found in the context!",
+      throw new NameNotFoundException(String.format("Name %s not found in the context!",
           subContextName));
     }
     if (!(boundObject instanceof ContextImpl)) {
@@ -382,9 +382,9 @@ public class ContextImpl implements Context {
       }
       if (subContext == null && !ctxMaps.containsKey(parsedName.get(0))) {
         throw new NameNotFoundException(
-            String.format("Name  %s  not found", name));
+            String.format("Name %s not found", name));
       } else {
-        throw new NotContextException(String.format("Expected Context but found  %s",
+        throw new NotContextException(String.format("Expected Context but found %s",
             subContext));
       }
     }
@@ -433,7 +433,7 @@ public class ContextImpl implements Context {
       // if not found
       if (!ctxMaps.containsKey(nameComponent)) {
         throw new NameNotFoundException(
-            String.format("Name  %s  not found", name));
+            String.format("Name %s not found", name));
       }
       // if this is a compound name
       else if (parsedName.size() > 1) {
@@ -441,7 +441,7 @@ public class ContextImpl implements Context {
           res = ((ContextImpl) res).lookup(parsedName.getSuffix(1));
         } else {
           throw new NotContextException(
-              String.format("Expected ContextImpl but found  %s", res));
+              String.format("Expected ContextImpl but found %s", res));
         }
       }
       return res;
@@ -451,7 +451,7 @@ public class ContextImpl implements Context {
         writer.info(String.format("ContextImpl::lookup::Error while looking up %s", name),
             e);
       throw new NameNotFoundException(
-          String.format("Name  %s  not found", name));
+          String.format("Name %s not found", name));
     } catch (SystemException se) {
       LogWriter writer = TransactionUtils.getLogWriter();
       if (writer.severeEnabled())
@@ -482,7 +482,7 @@ public class ContextImpl implements Context {
         writer.info(String.format("ContextImpl::lookup::Error while looking up %s", name),
             e);
       throw new NameNotFoundException(
-          String.format("Name  %s  not found", new Object[] {name}));
+          String.format("Name %s not found", new Object[] {name}));
     }
   }
 
@@ -540,7 +540,7 @@ public class ContextImpl implements Context {
           Context sub = createSubcontext(nameToBind);
           sub.bind(parsedName.getSuffix(1), obj);
         } else {
-          throw new NotContextException(String.format("Expected Context but found  %s",
+          throw new NotContextException(String.format("Expected Context but found %s",
               boundObject));
         }
       }
@@ -627,9 +627,9 @@ public class ContextImpl implements Context {
         // if the name is not found then throw exception
         if (!ctxMaps.containsKey(nameToRemove)) {
           throw new NameNotFoundException(
-              String.format("Can not find  %s", name));
+              String.format("Can not find %s", name));
         }
-        throw new NotContextException(String.format("Expected Context but found  %s",
+        throw new NotContextException(String.format("Expected Context but found %s",
             boundObject));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/net/SocketCreator.java b/geode-core/src/main/java/org/apache/geode/internal/net/SocketCreator.java
index e3abfa0..83ffd95 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/net/SocketCreator.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/net/SocketCreator.java
@@ -695,7 +695,7 @@ public class SocketCreator {
         result.bind(new InetSocketAddress(bindAddr, nport), backlog);
       } catch (BindException e) {
         BindException throwMe =
-            new BindException(String.format("Failed to create server socket on  %s[%s]",
+            new BindException(String.format("Failed to create server socket on %s[%s]",
                 bindAddr, Integer.valueOf(nport)));
         throwMe.initCause(e);
         throw throwMe;
@@ -752,7 +752,7 @@ public class SocketCreator {
         result.bind(new InetSocketAddress(bindAddr, nport), backlog);
       } catch (BindException e) {
         BindException throwMe =
-            new BindException(String.format("Failed to create server socket on  %s[%s]",
+            new BindException(String.format("Failed to create server socket on %s[%s]",
                 bindAddr == null ? InetAddress.getLocalHost() : bindAddr,
                 String.valueOf(nport)));
         throwMe.initCause(e);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/ArchiveSplitter.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/ArchiveSplitter.java
index 0963712..21b8bd3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/ArchiveSplitter.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/ArchiveSplitter.java
@@ -90,13 +90,13 @@ public class ArchiveSplitter implements StatArchiveFormat {
     byte archiveVersion = dataIn.readByte();
     if (archiveVersion <= 1) {
       throw new GemFireIOException(
-          String.format("Archive version:  %s  is no longer supported.",
+          String.format("Archive version: %s is no longer supported.",
               new Byte(archiveVersion)),
           null);
     }
     if (archiveVersion > ARCHIVE_VERSION) {
       throw new GemFireIOException(
-          String.format("Unsupported archive version:  %s .  The supported version is:  %s .",
+          String.format("Unsupported archive version: %s .  The supported version is: %s .",
 
               new Object[] {new Byte(archiveVersion), new Byte(ARCHIVE_VERSION)}),
           null);
@@ -241,7 +241,7 @@ public class ArchiveSplitter implements StatArchiveFormat {
             instBits[i] = readCompactValue();
             break;
           default:
-            throw new IOException(String.format("unexpected typeCode value  %s",
+            throw new IOException(String.format("unexpected typeCode value %s",
                 new Byte(instTypeCodes[i])));
         }
       }
@@ -324,7 +324,7 @@ public class ArchiveSplitter implements StatArchiveFormat {
             statDeltaBits = readCompactValue();
             break;
           default:
-            throw new IOException(String.format("unexpected typeCode value  %s",
+            throw new IOException(String.format("unexpected typeCode value %s",
                 new Byte(typeCodes[statOffset])));
         }
         bits[statOffset] += statDeltaBits;
@@ -364,7 +364,7 @@ public class ArchiveSplitter implements StatArchiveFormat {
           this.myIs.putBytes(this.dataOut);
           break;
         default:
-          throw new IOException(String.format("Unexpected token byte value:  %s",
+          throw new IOException(String.format("Unexpected token byte value: %s",
               new Byte(token)));
       }
       return true;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/GemFireStatSampler.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/GemFireStatSampler.java
index 80d16f0..76da84c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/GemFireStatSampler.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/GemFireStatSampler.java
@@ -457,7 +457,7 @@ public class GemFireStatSampler extends HostStatSampler {
           break;
         default:
           throw new RuntimeException(
-              String.format("Illegal field type  %s  for statistic",
+              String.format("Illegal field type %s for statistic",
                   stats.getType()));
       }
       result.stats = stats;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatHelper.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatHelper.java
index 4cc0a7f..a9fd1d1 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatHelper.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatHelper.java
@@ -174,7 +174,7 @@ public class HostStatHelper {
     } else if ((flags & SYSTEM_STAT_FLAG) != 0) {
       HostStatHelper.refreshSystem(stats);
     } else {
-      throw new RuntimeException(String.format("Unexpected os stats flags  %s",
+      throw new RuntimeException(String.format("Unexpected os stats flags %s",
           Integer.valueOf(flags)));
     }
   }
@@ -200,7 +200,7 @@ public class HostStatHelper {
         break;
       default:
         throw new InternalGemFireException(
-            String.format("unhandled osCode= %s  HostStatHelper:newProcess",
+            String.format("unhandled osCode= %s HostStatHelper:newProcess",
                 Integer.valueOf(osCode)));
     }
     // Note we don't call refreshProcess since we only want the manager to do that
@@ -229,7 +229,7 @@ public class HostStatHelper {
 
       default:
         throw new InternalGemFireException(
-            String.format("unhandled osCode= %s  HostStatHelper:newProcessStats",
+            String.format("unhandled osCode= %s HostStatHelper:newProcessStats",
                 Integer.valueOf(osCode)));
     }
   }
@@ -259,7 +259,7 @@ public class HostStatHelper {
         break;
       default:
         throw new InternalGemFireException(
-            String.format("unhandled osCode= %s  HostStatHelper:newSystem",
+            String.format("unhandled osCode= %s HostStatHelper:newSystem",
                 Integer.valueOf(osCode)));
     }
     if (stats instanceof LocalStatisticsImpl) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveReader.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveReader.java
index 10aeeef..462a359 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveReader.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveReader.java
@@ -210,7 +210,7 @@ public class StatArchiveReader implements StatArchiveFormat, AutoCloseable {
       case DOUBLE_CODE:
         return Double.longBitsToDouble(bits);
       default:
-        throw new InternalGemFireException(String.format("Unexpected typecode  %s",
+        throw new InternalGemFireException(String.format("Unexpected typecode %s",
             Integer.valueOf(type)));
     }
   }
@@ -1884,7 +1884,7 @@ public class StatArchiveReader implements StatArchiveFormat, AutoCloseable {
       // assert
       if (idx != resultSize) {
         throw new InternalGemFireException(
-            String.format("getValuesEx did not fill the last  %s  entries of its result.",
+            String.format("getValuesEx did not fill the last %s entries of its result.",
                 Integer.valueOf(resultSize - idx)));
       }
       return result;
@@ -2948,13 +2948,13 @@ public class StatArchiveReader implements StatArchiveFormat, AutoCloseable {
       String machine = dataIn.readUTF();
       if (archiveVersion <= 1) {
         throw new GemFireIOException(
-            String.format("Archive version:  %s  is no longer supported.",
+            String.format("Archive version: %s is no longer supported.",
                 Byte.valueOf(archiveVersion)),
             null);
       }
       if (archiveVersion > ARCHIVE_VERSION) {
         throw new GemFireIOException(
-            String.format("Unsupported archive version:  %s .  The supported version is:  %s .",
+            String.format("Unsupported archive version: %s .  The supported version is: %s .",
 
                 new Object[] {Byte.valueOf(archiveVersion), Byte.valueOf(ARCHIVE_VERSION)}),
             null);
@@ -3146,7 +3146,7 @@ public class StatArchiveReader implements StatArchiveFormat, AutoCloseable {
               v = readCompactValue();
               break;
             default:
-              throw new IOException(String.format("unexpected typeCode value  %s",
+              throw new IOException(String.format("unexpected typeCode value %s",
                   Byte.valueOf(stats[i].getTypeCode())));
           }
           resourceInstTable[resourceInstId].initialValue(i, v);
@@ -3223,7 +3223,7 @@ public class StatArchiveReader implements StatArchiveFormat, AutoCloseable {
               statDeltaBits = readCompactValue();
               break;
             default:
-              throw new IOException(String.format("unexpected typeCode value  %s",
+              throw new IOException(String.format("unexpected typeCode value %s",
                   Byte.valueOf(stats[statOffset].getTypeCode())));
           }
           if (resourceInstTable[resourceInstId].addValueSample(statOffset, statDeltaBits)) {
@@ -3277,7 +3277,7 @@ public class StatArchiveReader implements StatArchiveFormat, AutoCloseable {
             readSampleToken();
             break;
           default:
-            throw new IOException(String.format("Unexpected token byte value:  %s",
+            throw new IOException(String.format("Unexpected token byte value: %s",
                 Byte.valueOf(token)));
         }
         return true;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveWriter.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveWriter.java
index 007ce32..2d85d65 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveWriter.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveWriter.java
@@ -428,7 +428,7 @@ public class StatArchiveWriter implements StatArchiveFormat, SampleHandler {
     if (delta > MAX_SHORT_TIMESTAMP) {
       if (delta > Integer.MAX_VALUE) {
         throw new InternalGemFireException(
-            String.format("timeStamp delta  %s  was greater than  %s",
+            String.format("timeStamp delta %s was greater than %s",
 
                 new Object[] {Long.valueOf(delta), Integer.valueOf(Integer.MAX_VALUE)}));
       }
@@ -685,7 +685,7 @@ public class StatArchiveWriter implements StatArchiveFormat, SampleHandler {
       }
       if (idx <= 2) {
         throw new InternalGemFireException(
-            String.format("Expected idx to be greater than 2. It was  %s  for the value  %s",
+            String.format("Expected idx to be greater than 2. It was %s for the value %s",
 
                 new Object[] {Integer.valueOf(idx), Long.valueOf(originalValue)}));
       }
@@ -747,7 +747,7 @@ public class StatArchiveWriter implements StatArchiveFormat, SampleHandler {
         writeCompactValue(v, dataOut);
         break;
       default:
-        throw new InternalGemFireException(String.format("Unexpected type code  %s",
+        throw new InternalGemFireException(String.format("Unexpected type code %s",
             Byte.valueOf(typeCode)));
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticDescriptorImpl.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticDescriptorImpl.java
index b3d6967..06d5db7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticDescriptorImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticDescriptorImpl.java
@@ -99,7 +99,7 @@ public class StatisticDescriptorImpl implements StatisticDescriptor {
         return "double";
       default:
         throw new IllegalArgumentException(
-            String.format("Unknown type code:  %s",
+            String.format("Unknown type code: %s",
                 Integer.valueOf(code)));
     }
   }
@@ -125,7 +125,7 @@ public class StatisticDescriptorImpl implements StatisticDescriptor {
         return 64;
       default:
         throw new IllegalArgumentException(
-            String.format("Unknown type code:  %s",
+            String.format("Unknown type code: %s",
                 Integer.valueOf(code)));
     }
   }
@@ -151,7 +151,7 @@ public class StatisticDescriptorImpl implements StatisticDescriptor {
         return double.class;
       default:
         throw new IllegalArgumentException(
-            String.format("Unknown type code:  %s",
+            String.format("Unknown type code: %s",
                 Integer.valueOf(code)));
     }
   }
@@ -276,7 +276,7 @@ public class StatisticDescriptorImpl implements StatisticDescriptor {
         return Double.longBitsToDouble(bits);
       default:
         throw new RuntimeException(
-            String.format("unexpected stat descriptor type code:  %s",
+            String.format("unexpected stat descriptor type code: %s",
                 Byte.valueOf(this.typeCode)));
     }
   }
@@ -314,7 +314,7 @@ public class StatisticDescriptorImpl implements StatisticDescriptor {
     if (this.typeCode != INT) {
       throw new IllegalArgumentException(
           String.format(
-              "The statistic  %s  with id  %s  is of type  %s  and it was expected to be an int.",
+              "The statistic %s with id %s is of type %s and it was expected to be an int.",
               new Object[] {getName(), Integer.valueOf(getId()),
                   StatisticDescriptorImpl.getTypeCodeName(getTypeCode())}));
     }
@@ -339,7 +339,7 @@ public class StatisticDescriptorImpl implements StatisticDescriptor {
     if (this.typeCode != DOUBLE) {
       throw new IllegalArgumentException(
           String.format(
-              "The statistic  %s  with id  %s  is of type  %s  and it was expected to be a double.",
+              "The statistic %s with id %s is of type %s and it was expected to be a double.",
               new Object[] {getName(), Integer.valueOf(getId()),
                   StatisticDescriptorImpl.getTypeCodeName(getTypeCode())}));
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsImpl.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsImpl.java
index 13a6a0d..08ba75d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsImpl.java
@@ -556,7 +556,7 @@ public abstract class StatisticsImpl implements Statistics {
         return Double.valueOf(_getDouble(stat.getId()));
       default:
         throw new RuntimeException(
-            String.format("unexpected stat descriptor type code:  %s",
+            String.format("unexpected stat descriptor type code: %s",
                 Byte.valueOf(stat.getTypeCode())));
     }
   }
@@ -574,7 +574,7 @@ public abstract class StatisticsImpl implements Statistics {
         return Double.doubleToRawLongBits(_getDouble(stat.getId()));
       default:
         throw new RuntimeException(
-            String.format("unexpected stat descriptor type code:  %s",
+            String.format("unexpected stat descriptor type code: %s",
                 Byte.valueOf(stat.getTypeCode())));
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeFactoryImpl.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeFactoryImpl.java
index 41ade9d..0b2b513 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeFactoryImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeFactoryImpl.java
@@ -71,7 +71,7 @@ public class StatisticsTypeFactoryImpl implements StatisticsTypeFactory {
         result = currentValue;
       } else {
         throw new IllegalArgumentException(
-            String.format("Statistics type named  %s  already exists.",
+            String.format("Statistics type named %s already exists.",
                 result.getName()));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeImpl.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeImpl.java
index a73cf81..5d56e00 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeImpl.java
@@ -109,7 +109,7 @@ public class StatisticsTypeImpl implements StatisticsType {
     }
     if (stats.length > StatisticsTypeFactory.MAX_DESCRIPTORS_PER_TYPE) {
       throw new IllegalArgumentException(
-          String.format("The requested descriptor count  %s  exceeds the maximum which is   %s .",
+          String.format("The requested descriptor count %s exceeds the maximum which is  %s .",
               new Object[] {Integer.valueOf(stats.length),
                   Integer.valueOf(StatisticsTypeFactory.MAX_DESCRIPTORS_PER_TYPE)}));
     }
@@ -142,7 +142,7 @@ public class StatisticsTypeImpl implements StatisticsType {
       Object previousValue = statsMap.put(stats[i].getName(), sd);
       if (previousValue != null) {
         throw new IllegalArgumentException(
-            String.format("Duplicate StatisticDescriptor named  %s",
+            String.format("Duplicate StatisticDescriptor named %s",
                 stats[i].getName()));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeXml.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeXml.java
index ec2806c..d05365b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeXml.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsTypeXml.java
@@ -74,7 +74,7 @@ public class StatisticsTypeXml implements EntityResolver, ErrorHandler {
 
       } else {
         throw new SAXNotRecognizedException(
-            String.format("DTD not found:  %s", location));
+            String.format("DTD not found: %s", location));
       }
 
     } else {
@@ -260,7 +260,7 @@ public class StatisticsTypeXml implements EntityResolver, ErrorHandler {
         case DOUBLE_STORAGE:
           return statFactory.createDoubleCounter(statName, description, unit, largerBetter);
         default:
-          throw new RuntimeException(String.format("unexpected storage type  %s",
+          throw new RuntimeException(String.format("unexpected storage type %s",
               Integer.valueOf(storage)));
       }
     } else {
@@ -272,7 +272,7 @@ public class StatisticsTypeXml implements EntityResolver, ErrorHandler {
         case DOUBLE_STORAGE:
           return statFactory.createDoubleGauge(statName, description, unit, largerBetter);
         default:
-          throw new RuntimeException(String.format("unexpected storage type  %s",
+          throw new RuntimeException(String.format("unexpected storage type %s",
               Integer.valueOf(storage)));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java b/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java
index a00f51e..8f2e7b1 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java
@@ -647,7 +647,7 @@ public class Connection implements Runnable {
 
   public static int calcHdrSize(int byteSize) {
     if (byteSize > MAX_MSG_SIZE) {
-      throw new IllegalStateException(String.format("tcp message exceeded max size of  %s",
+      throw new IllegalStateException(String.format("tcp message exceeded max size of %s",
           Integer.valueOf(MAX_MSG_SIZE)));
     }
     int hdrSize = byteSize;
@@ -664,7 +664,7 @@ public class Connection implements Runnable {
     if (ver != HANDSHAKE_VERSION) {
       throw new IOException(
           String.format(
-              "Detected wrong version of GemFire product during handshake. Expected  %s  but found  %s",
+              "Detected wrong version of GemFire product during handshake. Expected %s but found %s",
               new Object[] {new Byte(HANDSHAKE_VERSION), new Byte(ver)}));
     }
     return ver;
@@ -764,7 +764,7 @@ public class Connection implements Runnable {
                 // connection-formation. So, we need to initiate suspect processing here
                 owner.getDM().getMembershipManager().suspectMember(this.remoteAddr,
                     String.format(
-                        "Connection handshake with  %s  timed out after waiting  %s  milliseconds.",
+                        "Connection handshake with %s timed out after waiting %s milliseconds.",
 
                         peerName, Integer.valueOf(HANDSHAKE_TIMEOUT_MS)));
               } else {
@@ -773,7 +773,7 @@ public class Connection implements Runnable {
               }
               throw new ConnectionException(
                   String.format(
-                      "Connection handshake with  %s  timed out after waiting  %s  milliseconds.",
+                      "Connection handshake with %s timed out after waiting %s milliseconds.",
                       peerName, Integer.valueOf(HANDSHAKE_TIMEOUT_MS)));
             } else {
               success = this.handshakeRead;
@@ -1051,7 +1051,7 @@ public class Connection implements Runnable {
           t.getConduit().getCancelCriterion().checkCancelInProgress(null);
           if (giveUpOnMember(mgr, remoteAddr)) {
             throw new IOException(
-                String.format("Member %s  left the group", remoteAddr));
+                String.format("Member %s left the group", remoteAddr));
           }
           if (!warningPrinted) {
             warningPrinted = true;
@@ -1096,7 +1096,7 @@ public class Connection implements Runnable {
               // and the socket was closed or we were sent
               // ShutdownMessage
               if (giveUpOnMember(mgr, remoteAddr)) {
-                throw new IOException(String.format("Member %s  left the group",
+                throw new IOException(String.format("Member %s left the group",
                     remoteAddr));
               }
               t.getConduit().getCancelCriterion().checkCancelInProgress(null);
@@ -2274,7 +2274,7 @@ public class Connection implements Runnable {
               if (handshakeByte != HANDSHAKE_VERSION) {
                 throw new IllegalStateException(
                     String.format(
-                        "Detected wrong version of GemFire product during handshake. Expected  %s  but found  %s",
+                        "Detected wrong version of GemFire product during handshake. Expected %s but found %s",
 
                         new Object[] {new Byte(HANDSHAKE_VERSION), new Byte(handshakeByte)}));
               }
@@ -2498,7 +2498,7 @@ public class Connection implements Runnable {
       DistributionMessage msg) throws IOException, ConnectionException {
     if (!connected) {
       throw new ConnectionException(
-          String.format("Not connected to  %s", this.remoteAddr));
+          String.format("Not connected to %s", this.remoteAddr));
     }
     if (this.batchFlusher != null) {
       batchSend(buffer);
@@ -2707,7 +2707,7 @@ public class Connection implements Runnable {
         if (this.disconnectRequested) {
           buffer.position(origBufferPos);
           // we have given up so just drop this message.
-          throw new ConnectionException(String.format("Forced disconnect sent to  %s",
+          throw new ConnectionException(String.format("Forced disconnect sent to %s",
               this.remoteAddr));
         }
         if (!force && !this.asyncQueuingInProgress) {
@@ -3405,7 +3405,7 @@ public class Connection implements Runnable {
       } catch (Exception ex) {
       }
       throw new ConnectionException(
-          String.format("Unable to read direct ack because:  %s", e));
+          String.format("Unable to read direct ack because: %s", e));
     } catch (ConnectionException e) {
       this.owner.getConduit().getCancelCriterion().checkCancelInProgress(e);
       throw e;
@@ -3419,7 +3419,7 @@ public class Connection implements Runnable {
       } catch (Exception ex) {
       }
       throw new ConnectionException(
-          String.format("Unable to read direct ack because:  %s", e));
+          String.format("Unable to read direct ack because: %s", e));
     } finally {
       stats.incProcessedMessages(1L);
       accessed();
@@ -3729,14 +3729,14 @@ public class Connection implements Runnable {
                 if (b != 0) {
                   throw new IllegalStateException(
                       String.format(
-                          "Detected old version (pre 5.0.1) of GemFire or non-GemFire during handshake due to initial byte being  %s",
+                          "Detected old version (pre 5.0.1) of GemFire or non-GemFire during handshake due to initial byte being %s",
                           new Byte(b)));
                 }
                 byte handshakeByte = dis.readByte();
                 if (handshakeByte != HANDSHAKE_VERSION) {
                   throw new IllegalStateException(
                       String.format(
-                          "Detected wrong version of GemFire product during handshake. Expected  %s  but found  %s",
+                          "Detected wrong version of GemFire product during handshake. Expected %s but found %s",
 
                           new Object[] {new Byte(HANDSHAKE_VERSION), new Byte(handshakeByte)}));
                 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/tcp/TCPConduit.java b/geode-core/src/main/java/org/apache/geode/internal/tcp/TCPConduit.java
index 04b03ef..18593dd 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/tcp/TCPConduit.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/tcp/TCPConduit.java
@@ -949,7 +949,7 @@ public class TCPConduit implements Runnable {
             if (conn == null) {
               // conduit may be closed - otherwise an ioexception would be thrown
               problem = new IOException(
-                  String.format("Unable to reconnect to server; possible shutdown:  %s",
+                  String.format("Unable to reconnect to server; possible shutdown: %s",
                       memberAddress));
             } else if (conn.isClosing() || !conn.getRemoteAddress().equals(memberAddress)) {
               if (logger.isDebugEnabled()) {
diff --git a/geode-core/src/main/java/org/apache/geode/management/membership/UniversalMembershipListenerAdapter.java b/geode-core/src/main/java/org/apache/geode/management/membership/UniversalMembershipListenerAdapter.java
index 4aa7ade..645b8f9 100644
--- a/geode-core/src/main/java/org/apache/geode/management/membership/UniversalMembershipListenerAdapter.java
+++ b/geode-core/src/main/java/org/apache/geode/management/membership/UniversalMembershipListenerAdapter.java
@@ -162,7 +162,7 @@ public abstract class UniversalMembershipListenerAdapter implements MembershipLi
   public UniversalMembershipListenerAdapter(int historySize) {
     if (historySize < 10) {
       throw new IllegalArgumentException(
-          String.format("Argument historySize must be between 10 and Integer.MAX_INT:  %s .",
+          String.format("Argument historySize must be between 10 and Integer.MAX_INT: %s .",
               Integer.valueOf(historySize)));
     }
     this.historySize = historySize;
diff --git a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/CqQueryDUnitTest.java b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/CqQueryDUnitTest.java
index 44a6a7a..7d1bf9c 100644
--- a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/CqQueryDUnitTest.java
+++ b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/CqQueryDUnitTest.java
@@ -89,7 +89,7 @@ import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
 public class CqQueryDUnitTest extends JUnit4CacheTestCase {
   private static final Logger logger = LogService.getLogger();
   /**
-   * The port on which the bridge server was started in this VM
+   * The port on which the cache server was started in this VM
    */
   private static int bridgeServerPort;
 
@@ -3221,10 +3221,10 @@ public class CqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /*
-   * Used only by tests that start and stop a server only to need to start the bridge server again
+   * Used only by tests that start and stop a server only to need to start the cache server again
    */
   private void restartBridgeServer(VM server, final int port) {
-    server.invoke(new CacheSerializableRunnable("Start bridge server") {
+    server.invoke(new CacheSerializableRunnable("Start cache server") {
       public void run2() {
         try {
           restartBridgeServers(getCache());
@@ -3237,7 +3237,7 @@ public class CqQueryDUnitTest extends JUnit4CacheTestCase {
 
 
   /**
-   * Starts a bridge server on the given port to serve up the given region.
+   * Starts a cache server on the given port to serve up the given region.
    *
    * @since GemFire 4.0
    */
@@ -3246,7 +3246,7 @@ public class CqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    *
    * @since GemFire 4.0
@@ -3262,7 +3262,7 @@ public class CqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Stops the bridge server that serves up the given cache.
+   * Stops the cache server that serves up the given cache.
    *
    * @since GemFire 4.0
    */
diff --git a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/CqQueryUsingPoolDUnitTest.java b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/CqQueryUsingPoolDUnitTest.java
index 3c60590..1c1357e 100644
--- a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/CqQueryUsingPoolDUnitTest.java
+++ b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/CqQueryUsingPoolDUnitTest.java
@@ -96,7 +96,7 @@ import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
 @Category({ClientSubscriptionTest.class})
 public class CqQueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
 
-  /** The port on which the bridge server was started in this VM */
+  /** The port on which the cache server was started in this VM */
   private static int bridgeServerPort;
 
   protected static int port = 0;
@@ -235,10 +235,10 @@ public class CqQueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Create a bridge server with partitioned region.
+   * Create a cache server with partitioned region.
    *
-   * @param server VM where to create the bridge server.
-   * @param port bridge server port.
+   * @param server VM where to create the cache server.
+   * @param port cache server port.
    * @param isAccessor if true the under lying partitioned region will not host data on this vm.
    * @param redundantCopies number of redundant copies for the primary bucket.
    */
@@ -2984,7 +2984,7 @@ public class CqQueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port to serve up the given region.
+   * Starts a cache server on the given port to serve up the given region.
    *
    * @since GemFire 4.0
    */
@@ -2993,7 +2993,7 @@ public class CqQueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    *
    * @since GemFire 4.0
@@ -3009,7 +3009,7 @@ public class CqQueryUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Stops the bridge server that serves up the given cache.
+   * Stops the cache server that serves up the given cache.
    *
    * @since GemFire 4.0
    */
diff --git a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/PartitionedRegionCqQueryDUnitTest.java b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/PartitionedRegionCqQueryDUnitTest.java
index 0466786..581e977 100644
--- a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/PartitionedRegionCqQueryDUnitTest.java
+++ b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/PartitionedRegionCqQueryDUnitTest.java
@@ -233,7 +233,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test for registering cqs on a bridge server with local max memory zero.
+   * test for registering cqs on a cache server with local max memory zero.
    */
   @Test
   public void testPartitionedCqOnAccessorBridgeServer() throws Exception {
@@ -243,7 +243,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating an accessor vm with Bridge Server installed.
+    // creating an accessor vm with cache server installed.
     createServer(server1, true);
 
     createServer(server2);
@@ -309,7 +309,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test for registering cqs on single Bridge server hosting all the data. This will generate all
+   * test for registering cqs on single cache server hosting all the data. This will generate all
    * the events locally and should always have the old value and should not sent the profile update
    * on wire.
    */
@@ -319,7 +319,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
     VM server1 = host.getVM(0);
     VM client = host.getVM(2);
 
-    // creating an accessor vm with Bridge Server installed.
+    // creating an accessor vm with cache server installed.
     createServer(server1);
     final int port = server1.invoke(() -> PartitionedRegionCqQueryDUnitTest.getCacheServerPort());
     final String host0 = NetworkUtils.getServerHostName(server1.getHost());
@@ -378,7 +378,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test for registering cqs on single Bridge server hosting all the data. This will generate all
+   * test for registering cqs on single cache server hosting all the data. This will generate all
    * the events locally but the puts, updates and destroys originate at an accessor vm.
    */
   @Test
@@ -388,7 +388,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating an accessor vm with Bridge Server installed.
+    // creating an accessor vm with cache server installed.
     createServer(server1, true);
 
     assertLocalMaxMemory(server1);
@@ -455,7 +455,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test to check invalidates on bridge server hosting datastores as well.
+   * test to check invalidates on cache server hosting datastores as well.
    */
   @Test
   public void testPRCqWithInvalidatesOnBridgeServer() {
@@ -464,8 +464,8 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1);
 
     // create another server with data store.
@@ -529,7 +529,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test cqs with invalidates on bridge server not hosting datastores.
+   * test cqs with invalidates on cache server not hosting datastores.
    */
   @Test
   public void testPRCqWithInvalidatesOnAccessorBridgeServer() throws Exception {
@@ -538,8 +538,8 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, true);
 
     // create another server with data store.
@@ -602,7 +602,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test cqs with create updates and destroys from client on bridge server hosting datastores.
+   * test cqs with create updates and destroys from client on cache server hosting datastores.
    */
   @Test
   public void testPRCqWithUpdatesFromClients() throws Exception {
@@ -612,8 +612,8 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
     VM client = host.getVM(2);
     VM client2 = host.getVM(3);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, false, 1);
 
     // create another server with data store.
@@ -680,7 +680,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test cqs on multiple partitioned region hosted by bridge servers.
+   * test cqs on multiple partitioned region hosted by cache servers.
    */
   @Test
   public void testPRCqWithMultipleRegionsOnServer() throws Exception {
@@ -690,8 +690,8 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
     VM client = host.getVM(2);
     VM client2 = host.getVM(3);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, false, 1);
 
     // create another server with data store.
@@ -780,7 +780,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * tests multiple cqs on partitioned region on bridge servers with profile update for not
+   * tests multiple cqs on partitioned region on cache servers with profile update for not
    * requiring old values.
    */
   @Test
@@ -791,8 +791,8 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
     VM client = host.getVM(2);
     VM client2 = host.getVM(3);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, false, 1);
 
     // create another server with data store.
@@ -1106,7 +1106,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * create bridge server with default attributes for partitioned region.
+   * create cache server with default attributes for partitioned region.
    */
   public void createServer(VM server) {
     createServer(server, 0, false, 0);
@@ -1115,8 +1115,8 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   /**
    * create accessor vm if the given accessor parameter variable is true.
    *
-   * @param server VM to create bridge server.
-   * @param accessor boolean if true creates an accessor bridge server.
+   * @param server VM to create cache server.
+   * @param accessor boolean if true creates an accessor cache server.
    */
   public void createServer(VM server, boolean accessor) {
     createServer(server, 0, accessor, 0);
@@ -1125,7 +1125,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   /**
    * create server with partitioned region with redundant copies.
    *
-   * @param server VM where to create the bridge server.
+   * @param server VM where to create the cache server.
    * @param accessor boolean if true create partitioned region with local max memory zero.
    * @param redundantCopies number of redundant copies for a partitioned region.
    */
@@ -1134,10 +1134,10 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Create a bridge server with partitioned region.
+   * Create a cache server with partitioned region.
    *
-   * @param server VM where to create the bridge server.
-   * @param port bridge server port.
+   * @param server VM where to create the cache server.
+   * @param port cache server port.
    * @param isAccessor if true the under lying partitioned region will not host data on this vm.
    * @param redundantCopies number of redundant copies for the primary bucket.
    */
@@ -1174,10 +1174,10 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Create a bridge server with partitioned region.
+   * Create a cache server with partitioned region.
    *
-   * @param server VM where to create the bridge server.
-   * @param port bridge server port.
+   * @param server VM where to create the cache server.
+   * @param port cache server port.
    * @param isAccessor if true the under lying partitioned region will not host data on this vm.
    * @param redundantCopies number of redundant copies for the primary bucket.
    */
@@ -1219,7 +1219,7 @@ public class PartitionedRegionCqQueryDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    *
    * @since GemFire 5.5
diff --git a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/PrCqUsingPoolDUnitTest.java b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/PrCqUsingPoolDUnitTest.java
index fc7d640..d3f63c0 100644
--- a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/PrCqUsingPoolDUnitTest.java
+++ b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/PrCqUsingPoolDUnitTest.java
@@ -205,7 +205,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test for registering cqs on a bridge server with local max memory zero.
+   * test for registering cqs on a cache server with local max memory zero.
    */
   @Test
   public void testPartitionedCqOnAccessorBridgeServer() throws Exception {
@@ -215,7 +215,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating an accessor vm with Bridge Server installed.
+    // creating an accessor vm with cache server installed.
     createServer(server1, true);
 
     createServer(server2);
@@ -290,7 +290,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Test for registering cqs on a bridge server with local max memory zero.
+   * Test for registering cqs on a cache server with local max memory zero.
    */
   @Test
   public void testCqOnAccessorServerWithUpdatesResultingInDestroyedCQEvents() throws Exception {
@@ -300,7 +300,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating an accessor vm with Bridge Server installed.
+    // creating an accessor vm with cache server installed.
     createServer(server1, true);
 
     createServer(server2);
@@ -345,7 +345,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
 
 
   /**
-   * test for registering cqs on single Bridge server hosting all the data. This will generate all
+   * test for registering cqs on single cache server hosting all the data. This will generate all
    * the events locally and should always have the old value and should not sent the profile update
    * on wire.
    */
@@ -356,7 +356,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     // VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating an accessor vm with Bridge Server installed.
+    // creating an accessor vm with cache server installed.
     createServer(server1);
     final int port = server1.invoke(() -> PrCqUsingPoolDUnitTest.getCacheServerPort());
     final String host0 = NetworkUtils.getServerHostName(server1.getHost());
@@ -425,7 +425,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test for registering cqs on single Bridge server hosting all the data. This will generate all
+   * test for registering cqs on single cache server hosting all the data. This will generate all
    * the events locally but the puts, updates and destroys originate at an accessor vm.
    */
   @Test
@@ -435,7 +435,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating an accessor vm with Bridge Server installed.
+    // creating an accessor vm with cache server installed.
     createServer(server1, true);
 
     assertLocalMaxMemory(server1);
@@ -512,7 +512,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test to check invalidates on bridge server hosting datastores as well.
+   * test to check invalidates on cache server hosting datastores as well.
    */
   @Test
   public void testPRCqWithInvalidatesOnBridgeServer() {
@@ -521,8 +521,8 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1);
 
     // create another server with data store.
@@ -597,7 +597,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test cqs with invalidates on bridge server not hosting datastores.
+   * test cqs with invalidates on cache server not hosting datastores.
    *
    */
   @Test
@@ -608,8 +608,8 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, true);
 
     // create another server with data store.
@@ -684,7 +684,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test cqs with create updates and destroys from client on bridge server hosting datastores.
+   * test cqs with create updates and destroys from client on cache server hosting datastores.
    */
   @Test
   public void testPRCqWithUpdatesFromClients() throws Exception {
@@ -695,8 +695,8 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM client = host.getVM(2);
     VM client2 = host.getVM(3);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, false, 1);
 
     // create another server with data store.
@@ -777,7 +777,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * test cqs on multiple partitioned region hosted by bridge servers.
+   * test cqs on multiple partitioned region hosted by cache servers.
    *
    */
   @Test
@@ -789,8 +789,8 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM client = host.getVM(2);
     VM client2 = host.getVM(3);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, false, 1);
 
     // create another server with data store.
@@ -895,7 +895,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * tests multiple cqs on partitioned region on bridge servers with profile update for not
+   * tests multiple cqs on partitioned region on cache servers with profile update for not
    * requiring old values.
    *
    */
@@ -908,8 +908,8 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM client = host.getVM(2);
     VM client2 = host.getVM(3);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, false, 1);
 
     // create another server with data store.
@@ -1202,8 +1202,8 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, false, 1);
 
     // create another server with data store.
@@ -1283,8 +1283,8 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, false, 1);
 
     // create another server with data store.
@@ -1374,8 +1374,8 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, false, 1);
 
     // create another server with data store.
@@ -1483,8 +1483,8 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
 
-    // creating Bridge Server with data store. clients will connect to this
-    // bridge server.
+    // creating cache server with data store. clients will connect to this
+    // cache server.
     createServer(server1, false, 1);
 
     // create another server with data store.
@@ -1597,7 +1597,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   // helper methods.
 
   /**
-   * create bridge server with default attributes for partitioned region.
+   * create cache server with default attributes for partitioned region.
    */
   public void createServer(VM server) {
     createServer(server, 0, false, 0);
@@ -1606,8 +1606,8 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   /**
    * create accessor vm if the given accessor parameter variable is true.
    *
-   * @param server VM to create bridge server.
-   * @param accessor boolean if true creates an accessor bridge server.
+   * @param server VM to create cache server.
+   * @param accessor boolean if true creates an accessor cache server.
    */
   public void createServer(VM server, boolean accessor) {
     createServer(server, 0, accessor, 0);
@@ -1616,7 +1616,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   /**
    * create server with partitioned region with redundant copies.
    *
-   * @param server VM where to create the bridge server.
+   * @param server VM where to create the cache server.
    * @param accessor boolean if true create partitioned region with local max memory zero.
    * @param redundantCopies number of redundant copies for a partitioned region.
    */
@@ -1625,10 +1625,10 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Create a bridge server with partitioned region.
+   * Create a cache server with partitioned region.
    *
-   * @param server VM where to create the bridge server.
-   * @param port bridge server port.
+   * @param server VM where to create the cache server.
+   * @param port cache server port.
    * @param isAccessor if true the under lying partitioned region will not host data on this vm.
    * @param redundantCopies number of redundant copies for the primary bucket.
    */
@@ -1674,7 +1674,7 @@ public class PrCqUsingPoolDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    *
    * @since GemFire 5.5
diff --git a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryCQDUnitTest.java b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryCQDUnitTest.java
index e7523d9..b44972c 100644
--- a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryCQDUnitTest.java
+++ b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryCQDUnitTest.java
@@ -72,7 +72,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     final int queryLimit = 6; // where id > 5 (0-5)
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -83,7 +83,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer();
         Region region = getRootRegion().getSubregion(regionName);
@@ -147,14 +147,14 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
     });
 
     // update
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         for (int i = 0; i < numberOfEntries * 2; i++) {
@@ -167,7 +167,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -250,7 +250,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
             "SELECT * FROM " + regName + " WHERE id > 5",};
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, true);
         Region region = getRootRegion().getSubregion(regionName);
@@ -258,7 +258,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, true);
         Region region = getRootRegion().getSubregion(regionName);
@@ -362,13 +362,13 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
     });
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         // Check for TestObject instances.
@@ -404,7 +404,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     }
 
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         // Check for TestObject instances.
@@ -414,7 +414,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -446,7 +446,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
             "SELECT * FROM " + regName + " WHERE id > 5",};
 
     // Start server1
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, true);
         Region region = getRootRegion().getSubregion(regionName);
@@ -454,7 +454,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     });
 
     // Start server2
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, true);
         Region region = getRootRegion().getSubregion(regionName);
@@ -462,7 +462,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     });
 
     // Start server3
-    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm2.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         configAndStartBridgeServer(false, true);
         Region region = getRootRegion().getSubregion(regionName);
@@ -554,13 +554,13 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
 
     // Check for TestObject instances on Server2.
     // It should be 0
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
     });
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         // Check for TestObject instances.
@@ -596,7 +596,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     }
 
 
-    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm0.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         // Check for TestObject instances.
@@ -604,7 +604,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
       }
     });
 
-    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm1.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
@@ -645,7 +645,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
 
     // Check for TestObject instances on Server3.
     // It should be 0
-    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+    vm2.invoke(new CacheSerializableRunnable("Create cache server") {
       public void run2() throws CacheException {
         assertEquals(0, TestObject.numInstance);
       }
diff --git a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryCQTestBase.java b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryCQTestBase.java
index ea81032..86a395f 100755
--- a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryCQTestBase.java
+++ b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/PdxQueryCQTestBase.java
@@ -60,7 +60,7 @@ import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 
 public abstract class PdxQueryCQTestBase extends JUnit4CacheTestCase {
 
-  /** The port on which the bridge server was started in this VM */
+  /** The port on which the cache server was started in this VM */
   private static int bridgeServerPort;
   protected static final Compressor compressor = SnappyCompressor.getDefaultInstance();
   protected final String rootRegionName = "root";
@@ -92,7 +92,7 @@ public abstract class PdxQueryCQTestBase extends JUnit4CacheTestCase {
     final Host host = Host.getHost(0);
     for (int i = 0; i < 4; i++) {
       VM vm = host.getVM(i);
-      vm.invoke(new CacheSerializableRunnable("Create Bridge Server") {
+      vm.invoke(new CacheSerializableRunnable("Create cache server") {
         public void run2() throws CacheException {
           TestObject.numInstance = 0;
           PortfolioPdx.numInstance = 0;
@@ -270,7 +270,7 @@ public abstract class PdxQueryCQTestBase extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    */
   protected void startBridgeServer(int port, boolean notifyBySubscription) throws IOException {
@@ -284,7 +284,7 @@ public abstract class PdxQueryCQTestBase extends JUnit4CacheTestCase {
   }
 
   /**
-   * Stops the bridge server that serves up the given cache.
+   * Stops the cache server that serves up the given cache.
    */
   protected void stopBridgeServer(Cache cache) {
     CacheServer bridge = (CacheServer) cache.getCacheServers().iterator().next();
@@ -471,7 +471,7 @@ public abstract class PdxQueryCQTestBase extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    */
   protected void startCacheServer(int port, boolean notifyBySubscription) throws IOException {
diff --git a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/QueryIndexUpdateRIDUnitTest.java b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/QueryIndexUpdateRIDUnitTest.java
index 57b7486..3d3f4c4 100644
--- a/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/QueryIndexUpdateRIDUnitTest.java
+++ b/geode-cq/src/distributedTest/java/org/apache/geode/cache/query/dunit/QueryIndexUpdateRIDUnitTest.java
@@ -64,7 +64,7 @@ import org.apache.geode.test.junit.categories.OQLIndexTest;
 @Category({OQLIndexTest.class})
 public class QueryIndexUpdateRIDUnitTest extends JUnit4CacheTestCase {
 
-  /** The port on which the bridge server was started in this VM */
+  /** The port on which the cache server was started in this VM */
   private static int bridgeServerPort;
 
   private String region = "regionA";
@@ -596,7 +596,7 @@ public class QueryIndexUpdateRIDUnitTest extends JUnit4CacheTestCase {
   }
 
   /**
-   * Starts a bridge server on the given port, using the given deserializeValues and
+   * Starts a cache server on the given port, using the given deserializeValues and
    * notifyBySubscription to serve up the given region.
    *
    * @since GemFire 6.6
diff --git a/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PRDeltaPropagationDUnitTest.java b/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PRDeltaPropagationDUnitTest.java
index cad3e10..f196c58 100755
--- a/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PRDeltaPropagationDUnitTest.java
+++ b/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PRDeltaPropagationDUnitTest.java
@@ -307,7 +307,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
   }
 
   /**
-   * 1) Put delta objects on client feeder connected PR accessor bridge server. 2) From accessor to
+   * 1) Put delta objects on client feeder connected PR accessor cache server. 2) From accessor to
    * data store delta gets propagated as part of <code>PutMessage</code> delta. 3) From data store
    * to client delta should get propagated.
    */
@@ -326,7 +326,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
   }
 
   /**
-   * 1) Put delta objects on client feeder connected PR accessor bridge server. 2) From accessor to
+   * 1) Put delta objects on client feeder connected PR accessor cache server. 2) From accessor to
    * data store delta gets propagated as part of <code>PutMessage</code> delta. 3) Exception occurs
    * when applying delta on datastore node. This invalid delta exception propagated back to client
    * through accessor. 4) Client sends full object in response.
@@ -398,7 +398,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
   }
 
   /**
-   * 1) Put delta objects on client feeder connected accessor bridge server. 2) From accessor to
+   * 1) Put delta objects on client feeder connected accessor cache server. 2) From accessor to
    * data store delta gets propagated as part of <code>UpdateMessage</code> delta. 3) Exception
    * occurs when applying delta on datastore node. This invalid delta exception propagated back to
    * client through accessor. 4) Client sends full object in response.
@@ -437,7 +437,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
   }
 
   /**
-   * 1) Put delta objects on feeder connected accessor bridge server. 2) Second client attached to
+   * 1) Put delta objects on feeder connected accessor cache server. 2) Second client attached to
    * datastore. Register CQ. 3) Varifies that no data loss, event revcieved on second client
    */
   @Test
@@ -519,7 +519,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
   }
 
   /**
-   * Topology: PR: Accessor,DataStore,Bridge server; configured for 2 buckets and redundancy 1
+   * Topology: PR: Accessor,DataStore,cache server; configured for 2 buckets and redundancy 1
    * DataStore has primary while BridgeServer has secondary of bucket. client connects to PR
    * Accessor client1 connects to PR BridgeServer client1 registers CQ client puts delta objects on
    * accessor Verify on client1 that queryUpdate and queryDestroy are executed properly
@@ -564,7 +564,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
   }
 
   /**
-   * Topology: PR: Accessor,DataStore,Bridge server; configured for 2 buckets and redundancy 1
+   * Topology: PR: Accessor,DataStore,cache server; configured for 2 buckets and redundancy 1
    * DataStore has primary while BridgeServer has secondary of bucket. client connects to PR
    * Accessor client1 connects to PR BridgeServer client1 registers Interest as well as CQ client
    * puts delta objects on accessor Verify that client1 receives 2 deltas for 2 updates (due to RI)
@@ -609,7 +609,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
   }
 
   /**
-   * 1) Put delta objects on client feeder connected to PR accessor bridge server. 2) From accessor
+   * 1) Put delta objects on client feeder connected to PR accessor cache server. 2) From accessor
    * to data store delta gets propagated as part of <code>PutMessage</code> delta. 3) From data
    * store to accessor delta + full value gets propagated as part of Adjunct Message. 4) From
    * accessor to client delta should get propagated.
diff --git a/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PutAllCSDUnitTest.java b/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PutAllCSDUnitTest.java
index 21f5cd2..96c951a 100755
--- a/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PutAllCSDUnitTest.java
+++ b/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PutAllCSDUnitTest.java
@@ -259,7 +259,7 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
           }
         });
 
-    server.invoke(new CacheSerializableRunnable(title + "verify Bridge Server") {
+    server.invoke(new CacheSerializableRunnable(title + "verify cache server") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -539,8 +539,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 1, its data are from client
-    server1.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 1") {
+    // verify cache server 1, its data are from client
+    server1.invoke(new CacheSerializableRunnable(title + "verify cache server 1") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -627,8 +627,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 1, its data are from client
-    server1.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 1") {
+    // verify cache server 1, its data are from client
+    server1.invoke(new CacheSerializableRunnable(title + "verify cache server 1") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -641,8 +641,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 2, because its data are from distribution
-    server2.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 2") {
+    // verify cache server 2, because its data are from distribution
+    server2.invoke(new CacheSerializableRunnable(title + "verify cache server 2") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -677,8 +677,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 1, its data are from client
-    server1.invoke(new CacheSerializableRunnable(title + "verify removeAll Bridge Server 1") {
+    // verify cache server 1, its data are from client
+    server1.invoke(new CacheSerializableRunnable(title + "verify removeAll cache server 1") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -690,8 +690,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 2, because its data are from distribution
-    server2.invoke(new CacheSerializableRunnable(title + "verify removeAll Bridge Server 2") {
+    // verify cache server 2, because its data are from distribution
+    server2.invoke(new CacheSerializableRunnable(title + "verify removeAll cache server 2") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -754,8 +754,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 1 for asyn keys
-    server1.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 1 for async keys") {
+    // verify cache server 1 for asyn keys
+    server1.invoke(new CacheSerializableRunnable(title + "verify cache server 1 for async keys") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -774,8 +774,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
         }
       }
     });
-    // verify bridge server 2 for asyn keys
-    server2.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 2 for async keys") {
+    // verify cache server 2 for asyn keys
+    server2.invoke(new CacheSerializableRunnable(title + "verify cache server 2 for async keys") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -842,8 +842,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 1, its data are from client
-    server1.invoke(new CacheSerializableRunnable(title + "verify async removeAll Bridge Server 1") {
+    // verify cache server 1, its data are from client
+    server1.invoke(new CacheSerializableRunnable(title + "verify async removeAll cache server 1") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -851,8 +851,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 2, because its data are from distribution
-    server2.invoke(new CacheSerializableRunnable(title + "verify async removeAll Bridge Server 2") {
+    // verify cache server 2, because its data are from distribution
+    server2.invoke(new CacheSerializableRunnable(title + "verify async removeAll cache server 2") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -884,8 +884,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 2 for p2p keys
-    server2.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 2 for p2p keys") {
+    // verify cache server 2 for p2p keys
+    server2.invoke(new CacheSerializableRunnable(title + "verify cache server 2 for p2p keys") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -932,8 +932,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
         assertEquals(0, region.size());
       }
     });
-    // verify bridge server 2, because its data are from distribution
-    server2.invoke(new CacheSerializableRunnable(title + "verify p2p removeAll Bridge Server 2") {
+    // verify cache server 2, because its data are from distribution
+    server2.invoke(new CacheSerializableRunnable(title + "verify p2p removeAll cache server 2") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1064,8 +1064,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 1, its data are from client
-    server1.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 1") {
+    // verify cache server 1, its data are from client
+    server1.invoke(new CacheSerializableRunnable(title + "verify cache server 1") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1079,7 +1079,7 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    server2.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 2") {
+    server2.invoke(new CacheSerializableRunnable(title + "verify cache server 2") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1101,8 +1101,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 1, its data are from client
-    server1.invoke(new CacheSerializableRunnable(title + "verify removeAll Bridge Server 1") {
+    // verify cache server 1, its data are from client
+    server1.invoke(new CacheSerializableRunnable(title + "verify removeAll cache server 1") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1114,8 +1114,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 2, because its data are from distribution
-    server2.invoke(new CacheSerializableRunnable(title + "verify removeAll Bridge Server 2") {
+    // verify cache server 2, because its data are from distribution
+    server2.invoke(new CacheSerializableRunnable(title + "verify removeAll cache server 2") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1151,8 +1151,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 1, its data are from client
-    server1.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 1") {
+    // verify cache server 1, its data are from client
+    server1.invoke(new CacheSerializableRunnable(title + "verify cache server 1") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1164,7 +1164,7 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    server2.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 2") {
+    server2.invoke(new CacheSerializableRunnable(title + "verify cache server 2") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1182,7 +1182,7 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    server1.invoke(new CacheSerializableRunnable(title + "verify removeAll Bridge Server 1") {
+    server1.invoke(new CacheSerializableRunnable(title + "verify removeAll cache server 1") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1190,7 +1190,7 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    server2.invoke(new CacheSerializableRunnable(title + "verify removeAll Bridge Server 2") {
+    server2.invoke(new CacheSerializableRunnable(title + "verify removeAll cache server 2") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1226,8 +1226,8 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    // verify bridge server 1, its data are from client
-    server1.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 1") {
+    // verify cache server 1, its data are from client
+    server1.invoke(new CacheSerializableRunnable(title + "verify cache server 1") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1311,7 +1311,7 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    client2.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 2") {
+    client2.invoke(new CacheSerializableRunnable(title + "verify cache server 2") {
       @Override
       public void run2() throws CacheException {
         Wait.pause(5000);
@@ -1344,7 +1344,7 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    server1.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 1") {
+    server1.invoke(new CacheSerializableRunnable(title + "verify cache server 1") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1357,7 +1357,7 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
       }
     });
 
-    server2.invoke(new CacheSerializableRunnable(title + "verify Bridge Server 2") {
+    server2.invoke(new CacheSerializableRunnable(title + "verify cache server 2") {
       @Override
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
@@ -1370,7 +1370,7 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
... 373 lines suppressed ...


Mime
View raw message