geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jbarr...@apache.org
Subject [geode-native] branch develop updated: GEODE-2484: Replace ACE Map with synchronized unordered_map (#401)
Date Thu, 06 Dec 2018 19:11:15 GMT
This is an automated email from the ASF dual-hosted git repository.

jbarrett pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/geode-native.git


The following commit(s) were added to refs/heads/develop by this push:
     new 92e59fe  GEODE-2484: Replace ACE Map with synchronized unordered_map (#401)
92e59fe is described below

commit 92e59fe14c3b6b4eb363708116bbc96dffc04fc8
Author: Jacob Barrett <jbarrett@pivotal.io>
AuthorDate: Thu Dec 6 11:11:10 2018 -0800

    GEODE-2484: Replace ACE Map with synchronized unordered_map (#401)
    
    * Adds an synchronized_map wrapper template to synchronize a map with a
      mutex.
    * Replaces unsynchronized ACE Map with std::unordered_map.
    * Replaces ACE synchronized Set with synchronized std::set
    * Replaces ACE_Semaphore with std::mutex in ThinClientRegion
    * Replaces ACE_Semaphore with std::condition_variable in ClientMetaDataService
    * Replaces ACE_Task with std::thread in ClientMetaDataService
    * Replaces ACE_RW_Thread_Mutex with boost::shared_mutex in ClientMetaDataService
    * Replaces use of synchronize bounded queue.
     * Was not using the bounded aspect of the Queue type.
     * Converted to using std::deque directly.
     * Refactored out coalesce function.
    * Replaces ACE_Task with std::thread in EvictionController
    * Replace ACE_RW_Thread_Mutex with boost::shared_mutex in EvictionController
    * Replace specialized IntQueue with std::queue in EvictionController
     * Use condition variables to avoid spurious wakeups.
    * Replace raw pointer allocation with std::unique_ptr
    * Replace specialized IntQueue with std::deque
    * Replace ACE_Task with std::thread.
     * Adds new wrapper based on std::thread.
    * Cleanup includes for fast compilation.
    * Use std::unique_ptr to manage destruct.
    * Adds thread names to MacOS and Windows threads.
---
 clicache/src/TypeRegistry.cpp                      |   2 +-
 cppcache/CMakeLists.txt                            |  10 +
 cppcache/integration-test-2/CMakeLists.txt         |   4 +-
 cppcache/integration-test/DeltaEx.hpp              |   3 +-
 .../testXmlCacheCreationWithOverFlow.cpp           |   6 +-
 .../testXmlCacheCreationWithPools.cpp              |   4 +-
 cppcache/shared/CMakeLists.txt                     |   6 +-
 cppcache/src/AdminRegion.cpp                       |   1 +
 cppcache/src/CacheFactory.cpp                      |   5 +-
 cppcache/src/CacheImpl.cpp                         | 135 ++++----
 cppcache/src/CacheImpl.hpp                         |  35 +-
 cppcache/src/ClientConnectionRequest.cpp           |   4 +-
 cppcache/src/ClientConnectionRequest.hpp           |   5 +-
 cppcache/src/ClientMetadata.cpp                    |  12 +-
 cppcache/src/ClientMetadata.hpp                    |   2 -
 cppcache/src/ClientMetadataService.cpp             | 258 ++++++--------
 cppcache/src/ClientMetadataService.hpp             |  69 ++--
 cppcache/src/ClientReplacementRequest.cpp          |   4 +-
 cppcache/src/ClientReplacementRequest.hpp          |   5 +-
 cppcache/src/CqEventImpl.cpp                       |   2 +
 cppcache/src/CqQueryImpl.cpp                       |   1 +
 cppcache/src/CqQueryImpl.hpp                       |   1 -
 cppcache/src/CqService.cpp                         |  77 ++---
 cppcache/src/CqService.hpp                         |  24 +-
 cppcache/src/DistributedSystemImpl.cpp             |  43 +++
 cppcache/src/DistributedSystemImpl.hpp             |  31 +-
 cppcache/src/EvictionController.cpp                | 128 +++----
 cppcache/src/EvictionController.hpp                |  80 ++---
 cppcache/src/EvictionThread.cpp                    |  51 ++-
 cppcache/src/EvictionThread.hpp                    |  39 +--
 cppcache/src/ExecutionImpl.cpp                     |   1 +
 cppcache/src/IntQueue.hpp                          | 101 ------
 .../src/InternalCacheTransactionManager2PCImpl.cpp |   1 +
 cppcache/src/LRUEntriesMap.cpp                     |   1 +
 cppcache/src/LocalRegion.cpp                       | 114 ++++---
 cppcache/src/LocalRegion.hpp                       |  27 +-
 cppcache/src/MapSegment.cpp                        | 143 ++++----
 cppcache/src/MapSegment.hpp                        |  48 +--
 cppcache/src/MapWithLock.hpp                       |  32 +-
 cppcache/src/NonCopyable.hpp                       |   8 +-
 cppcache/src/Pool.cpp                              |   5 +-
 cppcache/src/PoolFactory.cpp                       |   1 +
 cppcache/src/PoolStatistics.hpp                    |   1 +
 cppcache/src/RemoteQuery.cpp                       |   1 +
 cppcache/src/RemoteQuery.hpp                       |   7 +-
 cppcache/src/SerializationRegistry.cpp             | 117 +++----
 cppcache/src/SerializationRegistry.hpp             |  96 ++----
 cppcache/src/Set.hpp                               | 132 --------
 cppcache/src/Task.hpp                              |  96 +++---
 cppcache/src/TcrConnection.cpp                     |   9 +-
 cppcache/src/TcrConnection.hpp                     |   5 +-
 cppcache/src/TcrConnectionManager.cpp              | 135 ++++----
 cppcache/src/TcrConnectionManager.hpp              |  24 +-
 cppcache/src/TcrEndpoint.cpp                       |  40 +--
 cppcache/src/TcrEndpoint.hpp                       |  12 +-
 cppcache/src/TcrHADistributionManager.cpp          |   2 +
 cppcache/src/TcrHADistributionManager.hpp          |  14 +-
 cppcache/src/TcrMessage.cpp                        |   1 +
 cppcache/src/TcrMessage.hpp                        |   1 +
 cppcache/src/TcrPoolEndPoint.cpp                   |   6 +-
 cppcache/src/ThinClientBaseDM.cpp                  |  25 +-
 cppcache/src/ThinClientBaseDM.hpp                  |  29 +-
 .../src/ThinClientCacheDistributionManager.cpp     |  20 +-
 cppcache/src/ThinClientDistributionManager.cpp     |   2 +
 cppcache/src/ThinClientLocatorHelper.cpp           |   1 +
 cppcache/src/ThinClientLocatorHelper.hpp           |   4 +-
 cppcache/src/ThinClientPoolDM.cpp                  | 285 +++++++++++-----
 cppcache/src/ThinClientPoolDM.hpp                  | 199 +++--------
 cppcache/src/ThinClientPoolHADM.cpp                |  49 ++-
 cppcache/src/ThinClientPoolHADM.hpp                |  86 ++---
 cppcache/src/ThinClientPoolStickyDM.cpp            |   3 +-
 cppcache/src/ThinClientPoolStickyDM.hpp            |  29 +-
 cppcache/src/ThinClientRedundancyManager.cpp       |  41 +--
 cppcache/src/ThinClientRedundancyManager.hpp       |  17 +-
 cppcache/src/ThinClientRegion.cpp                  |  44 ++-
 cppcache/src/ThinClientRegion.hpp                  |   5 +-
 cppcache/src/ThinClientStickyManager.hpp           |   1 +
 cppcache/src/ThreadPool.cpp                        | 146 +++-----
 cppcache/src/ThreadPool.hpp                        |  76 ++---
 cppcache/src/TssConnectionWrapper.hpp              |  19 +-
 cppcache/src/config.h.in                           |   7 +
 cppcache/src/statistics/GeodeStatisticsFactory.cpp |  40 +--
 cppcache/src/statistics/GeodeStatisticsFactory.hpp |   9 +-
 cppcache/src/statistics/HostStatSampler.cpp        |  13 +-
 cppcache/src/statistics/HostStatSampler.hpp        |  16 +-
 cppcache/src/statistics/PoolStatsSampler.cpp       |  26 +-
 cppcache/src/statistics/PoolStatsSampler.hpp       |  20 +-
 cppcache/src/statistics/StatArchiveWriter.cpp      |   1 +
 cppcache/src/statistics/StatArchiveWriter.hpp      |   3 +-
 cppcache/src/statistics/StatisticsManager.cpp      |  13 +-
 cppcache/src/statistics/StatisticsManager.hpp      |   4 +-
 cppcache/src/{NonCopyable.hpp => util/queue.hpp}   |  49 +--
 cppcache/src/util/string.hpp                       |  12 +-
 cppcache/src/util/synchronized_map.hpp             | 166 +++++++++
 cppcache/src/util/synchronized_set.hpp             | 175 ++++++++++
 cppcache/test/CMakeLists.txt                       |  14 +-
 .../{CacheableDate.cpp => CacheableDateTest.cpp}   |   0
 cppcache/test/LocalRegionTest.cpp                  |  79 +++++
 ...{TcrMessage_unittest.cpp => TcrMessageTest.cpp} |   0
 .../functionalTests.cpp => ThreadPoolTest.cpp}     |  43 ++-
 cppcache/test/util/JavaModifiedUtf8Tests.cpp       |   6 +-
 cppcache/test/util/TestableRecursiveMutex.hpp      |  63 ++++
 cppcache/test/util/chrono/durationTest.cpp         |   8 +-
 cppcache/test/util/functionalTests.cpp             |   2 +-
 .../util/{functionalTests.cpp => queueTest.cpp}    |  23 +-
 cppcache/test/util/synchronized_mapTest.cpp        | 373 +++++++++++++++++++++
 cppcache/test/util/synchronized_setTest.cpp        | 373 +++++++++++++++++++++
 107 files changed, 2814 insertions(+), 2033 deletions(-)

diff --git a/clicache/src/TypeRegistry.cpp b/clicache/src/TypeRegistry.cpp
index f23311a..9809c62 100644
--- a/clicache/src/TypeRegistry.cpp
+++ b/clicache/src/TypeRegistry.cpp
@@ -250,7 +250,7 @@ namespace Apache
           auto&& serializationRegistry = CacheRegionHelper::getCacheImpl(m_cache->GetNative().get())->getSerializationRegistry();
           auto nativeDelegateFunction = static_cast<std::shared_ptr<native::Serializable>(*)()>(
               System::Runtime::InteropServices::Marshal::GetFunctionPointerForDelegate(nativeDelegate).ToPointer());
-          serializationRegistry->addDataSerializableFixedIdType(fixedId, nativeDelegateFunction);
+          serializationRegistry->addDataSerializableFixedIdType(static_cast<internal::DSFid>(fixedId), nativeDelegateFunction);
         _GF_MG_EXCEPTION_CATCH_ALL2
       }
 
diff --git a/cppcache/CMakeLists.txt b/cppcache/CMakeLists.txt
index 0ad660b..1f97c41 100644
--- a/cppcache/CMakeLists.txt
+++ b/cppcache/CMakeLists.txt
@@ -16,6 +16,15 @@
 cmake_minimum_required(VERSION 3.10)
 project(cppcache LANGUAGES CXX)
 
+include(CheckIncludeFile)
+include(CheckFunctionExists)
+
+find_package(Threads)
+if (CMAKE_USE_PTHREADS_INIT)
+  check_include_file("pthread.h" HAVE_PTHREAD_H)
+  check_function_exists("pthread_setname_np" HAVE_pthread_setname_np)
+endif()
+
 set(COMMON_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src)
 set(COMMON_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include)
 
@@ -89,6 +98,7 @@ target_link_libraries(_apache-geode INTERFACE
   ACE
   Boost::boost
   Boost::filesystem
+  Boost::thread
   libxml2
 )
 
diff --git a/cppcache/integration-test-2/CMakeLists.txt b/cppcache/integration-test-2/CMakeLists.txt
index 05fa53a..ddd14ed 100644
--- a/cppcache/integration-test-2/CMakeLists.txt
+++ b/cppcache/integration-test-2/CMakeLists.txt
@@ -55,9 +55,9 @@ target_link_libraries(integration-test-2
     Boost::system
     Boost::log
     Boost::filesystem
- PRIVATE
+  PRIVATE
     _WarningsAsError
- internal
+    internal
 )
 
 if(WIN32)
diff --git a/cppcache/integration-test/DeltaEx.hpp b/cppcache/integration-test/DeltaEx.hpp
index 931456c..f1f4de2 100644
--- a/cppcache/integration-test/DeltaEx.hpp
+++ b/cppcache/integration-test/DeltaEx.hpp
@@ -27,10 +27,11 @@
 #include <geode/PdxWriter.hpp>
 #include <geode/PdxReader.hpp>
 #include <geode/Delta.hpp>
+#include <geode/DataSerializable.hpp>
 
 #include "CacheHelper.hpp"
 
-namespace { // NOLINT(google-build-namespaces)
+namespace {  // NOLINT(google-build-namespaces)
 
 using apache::geode::client::DataInput;
 using apache::geode::client::DataOutput;
diff --git a/cppcache/integration-test/testXmlCacheCreationWithOverFlow.cpp b/cppcache/integration-test/testXmlCacheCreationWithOverFlow.cpp
index d882422..ea9a1b9 100644
--- a/cppcache/integration-test/testXmlCacheCreationWithOverFlow.cpp
+++ b/cppcache/integration-test/testXmlCacheCreationWithOverFlow.cpp
@@ -95,13 +95,13 @@ int testXmlCacheCreationWithOverflow() {
     std::cout << "vc[" << i << "].m_reaPtr=" << vrp.at(i).get() << std::endl;
     std::cout << "vc[" << i << "]=" << vrp.at(i)->getName() << std::endl;
   }
-  auto regPtr1 = vrp.at(0);
+  auto regPtr1 = cptr->getRegion("Root1");
 
   std::cout
       << "Test if the number of sub regions with the root region Root1 are "
          "correct"
       << std::endl;
-  std::vector<std::shared_ptr<Region>> vr = regPtr1->subregions(true);
+  auto vr = regPtr1->subregions(true);
   std::cout << "  vr.size=" << vr.size() << std::endl;
   if (vr.size() != totalSubRegionsRoot1) {
     std::cout << "Number of Subregions does not match" << std::endl;
@@ -116,7 +116,7 @@ int testXmlCacheCreationWithOverflow() {
   }
 
   std::cout << "Test if the nesting of regions is correct" << std::endl;
-  auto regPtr2 = vrp.at(1);
+  auto regPtr2 = cptr->getRegion("Root2");
   auto &&vsr = regPtr2->subregions(true);
   for (auto &&regPtr : vsr) {
     auto &&childName = regPtr->getName();
diff --git a/cppcache/integration-test/testXmlCacheCreationWithPools.cpp b/cppcache/integration-test/testXmlCacheCreationWithPools.cpp
index a207e69..81329a5 100644
--- a/cppcache/integration-test/testXmlCacheCreationWithPools.cpp
+++ b/cppcache/integration-test/testXmlCacheCreationWithPools.cpp
@@ -311,7 +311,7 @@ int testXmlCacheCreationWithPools() {
     std::cout << "vc[" << i << "].m_regionPtr=" << vrp.at(i).get() << std::endl;
     std::cout << "vc[" << i << "]=" << vrp.at(i)->getName() << std::endl;
   }
-  auto regPtr1 = vrp.at(0);
+  auto regPtr1 = cptr->getRegion("Root1");
 
   auto &&vr = regPtr1->subregions(true);
   std::cout << "Test if the number of sub regions with the root region Root1 "
@@ -335,7 +335,7 @@ int testXmlCacheCreationWithPools() {
   // pools. Check if this assumption is valid and if so then break up this test.
   auto subRegPtr = vr.at(0);
 
-  auto regPtr2 = vrp.at(1);
+  auto regPtr2 = cptr->getRegion("Root2");
 
   std::cout << "Test if the number of sub regions with the root region Root2 "
                "are correct"
diff --git a/cppcache/shared/CMakeLists.txt b/cppcache/shared/CMakeLists.txt
index 09c11ec..e8803a0 100644
--- a/cppcache/shared/CMakeLists.txt
+++ b/cppcache/shared/CMakeLists.txt
@@ -13,7 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-cmake_minimum_required(VERSION 3.4)
 project(apache-geode LANGUAGES CXX)
 
 add_library(apache-geode SHARED ${SOURCES_ALL} ${RESOURCES})
@@ -24,6 +23,11 @@ if (MSVC)
 
 #define APACHE_GEODE_EXTERN_TEMPLATE_EXPORT
 ")
+
+  target_compile_options(apache-geode
+    PRIVATE
+      /bigobj # C1128 - large number of templates causes too many section.
+  )
 else()
   set(EXPORT_HEADER_CUSTOM_CONTENT "
 #define APACHE_GEODE_EXPLICIT_TEMPLATE_EXPORT
diff --git a/cppcache/src/AdminRegion.cpp b/cppcache/src/AdminRegion.cpp
index 5a93743..e08c734 100644
--- a/cppcache/src/AdminRegion.cpp
+++ b/cppcache/src/AdminRegion.cpp
@@ -20,6 +20,7 @@
 #include <geode/SystemProperties.hpp>
 
 #include "CacheImpl.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientPoolDM.hpp"
 #include "ThinClientRegion.hpp"
 #include "statistics/StatisticsManager.hpp"
diff --git a/cppcache/src/CacheFactory.cpp b/cppcache/src/CacheFactory.cpp
index b6ebb8d..72e63dd 100644
--- a/cppcache/src/CacheFactory.cpp
+++ b/cppcache/src/CacheFactory.cpp
@@ -108,9 +108,8 @@ Cache CacheFactory::create() const {
       std::bind(VersionTag::createDeserializable, memberListForVersionStamp));
 
   serializationRegistry->addDataSerializableFixedIdType(
-      static_cast<int64_t>(DSFid::DiskVersionTag),
-      std::bind(DiskVersionTag::createDeserializable,
-                memberListForVersionStamp));
+      DSFid::DiskVersionTag, std::bind(DiskVersionTag::createDeserializable,
+                                       memberListForVersionStamp));
 
   serializationRegistry->setPdxTypeHandler(new PdxTypeHandler());
   serializationRegistry->setDataSerializableHandler(
diff --git a/cppcache/src/CacheImpl.cpp b/cppcache/src/CacheImpl.cpp
index 0c4782f..87e0c61 100644
--- a/cppcache/src/CacheImpl.cpp
+++ b/cppcache/src/CacheImpl.cpp
@@ -29,12 +29,14 @@
 #include "AutoDelete.hpp"
 #include "CacheXmlParser.hpp"
 #include "ClientProxyMembershipID.hpp"
+#include "EvictionController.hpp"
 #include "ExpiryTaskManager.hpp"
 #include "InternalCacheTransactionManager2PCImpl.hpp"
 #include "LocalRegion.hpp"
 #include "PdxTypeRegistry.hpp"
 #include "RegionExpiryHandler.hpp"
 #include "SerializationRegistry.hpp"
+#include "TcrConnectionManager.hpp"
 #include "TcrMessage.hpp"
 #include "ThinClientHARegion.hpp"
 #include "ThinClientPoolDM.hpp"
@@ -65,7 +67,6 @@ CacheImpl::CacheImpl(Cache* c, const std::shared_ptr<Properties>& dsProps,
       m_clientProxyMembershipIDFactory(m_distributedSystem.getName()),
       m_cache(c),
       m_attributes(nullptr),
-      m_evictionControllerPtr(nullptr),
       m_tcrConnectionManager(nullptr),
       m_remoteQueryServicePtr(nullptr),
       m_destroyPending(false),
@@ -75,20 +76,18 @@ CacheImpl::CacheImpl(Cache* c, const std::shared_ptr<Properties>& dsProps,
           *(std::make_shared<MemberListForVersionStamp>())),
       m_serializationRegistry(std::make_shared<SerializationRegistry>()),
       m_pdxTypeRegistry(nullptr),
-      m_threadPool(new ThreadPool(
-          m_distributedSystem.getSystemProperties().threadPoolSize())),
+      m_threadPool(m_distributedSystem.getSystemProperties().threadPoolSize()),
       m_authInitialize(authInitialize) {
   using apache::geode::statistics::StatisticsManager;
 
   m_cacheTXManager = std::shared_ptr<InternalCacheTransactionManager2PC>(
       new InternalCacheTransactionManager2PCImpl(this));
 
-  m_regions = new MapOfRegionWithLock();
   auto& prop = m_distributedSystem.getSystemProperties();
   if (prop.heapLRULimitEnabled()) {
-    m_evictionControllerPtr =
-        new EvictionController(prop.heapLRULimit(), prop.heapLRUDelta(), this);
-    m_evictionControllerPtr->start();
+    m_evictionController = std::unique_ptr<EvictionController>(
+        new EvictionController(prop.heapLRULimit(), prop.heapLRUDelta(), this));
+    m_evictionController->start();
     LOGINFO("Heap LRU eviction controller thread started");
   }
 
@@ -184,14 +183,11 @@ CacheImpl::RegionKind CacheImpl::getRegionKind(
   return regionKind;
 }
 
-int CacheImpl::removeRegion(const char* name) {
+void CacheImpl::removeRegion(const std::string& name) {
   TryReadGuard guardCacheDestroy(m_destroyCacheMutex, m_destroyPending);
-  if (m_destroyPending) {
-    return 0;
+  if (!m_destroyPending) {
+    m_regions.erase(name);
   }
-
-  MapOfRegionGuard guard(m_regions->mutex());
-  return m_regions->unbind(name);
 }
 
 std::shared_ptr<QueryService> CacheImpl::getQueryService(bool noInit) {
@@ -232,10 +228,6 @@ CacheImpl::~CacheImpl() {
   if (!m_closed) {
     close();
   }
-
-  if (m_regions != nullptr) {
-    delete m_regions;
-  }
 }
 
 const std::string& CacheImpl::getName() const {
@@ -308,24 +300,22 @@ void CacheImpl::close(bool keepalive) {
     m_tcrConnectionManager->close();
   }
 
-  MapOfRegionWithLock regions;
+  std::unordered_map<std::string, std::shared_ptr<Region>> regions;
   getSubRegions(regions);
 
-  for (MapOfRegionWithLock::iterator q = regions.begin(); q != regions.end();
-       ++q) {
+  for (const auto& kv : regions) {
     // TODO: remove dynamic_cast here by having RegionInternal in the regions
     // map
-    auto rImpl = std::dynamic_pointer_cast<RegionInternal>((*q).int_id_);
-    if (rImpl != nullptr) {
+    auto rImpl = std::dynamic_pointer_cast<RegionInternal>(kv.second);
+    if (rImpl) {
       rImpl->destroyRegionNoThrow(
           nullptr, false,
           CacheEventFlags::LOCAL | CacheEventFlags::CACHE_CLOSE);
     }
   }
 
-  if (m_evictionControllerPtr != nullptr) {
-    m_evictionControllerPtr->stop();
-    _GEODE_SAFE_DELETE(m_evictionControllerPtr);
+  if (m_evictionController) {
+    m_evictionController->stop();
   }
 
   // Close CachePef Stats
@@ -343,7 +333,7 @@ void CacheImpl::close(bool keepalive) {
     _GEODE_SAFE_DELETE(m_cacheStats);
   }
 
-  m_regions->unbind_all();
+  m_regions.clear();
   LOGDEBUG("CacheImpl::close( ): destroyed regions.");
 
   _GEODE_SAFE_DELETE(m_tcrConnectionManager);
@@ -406,12 +396,9 @@ void CacheImpl::createRegion(std::string name,
   validateRegionAttributes(name, regionAttributes);
   std::shared_ptr<RegionInternal> rpImpl = nullptr;
   {
-    // For multi threading and the operations between bind and find seems to be
-    // hard to be atomic since a regionImpl needs to be valid before it can be
-    // bound
-    MapOfRegionGuard guard1(m_regions->mutex());
-    std::shared_ptr<Region> tmp;
-    if (0 == m_regions->find(name, tmp)) {
+    auto&& lock = m_regions.make_lock();
+
+    if (m_regions.find(name) != m_regions.end()) {
       throw RegionExistsException("Cache::createRegion: \"" + name +
                                   "\" region exists in local cache");
     }
@@ -455,20 +442,18 @@ void CacheImpl::createRegion(std::string name,
     }
 
     rpImpl->acquireReadLock();
-    m_regions->bind(regionPtr->getName(), regionPtr);
-
-    // When region is created, added that region name in client meta data
-    // service to fetch its
-    // metadata for single hop.
-    auto& props = m_distributedSystem.getSystemProperties();
-    const auto& poolName = regionAttributes.getPoolName();
-    if (!poolName.empty()) {
-      auto pool = getPoolManager().find(poolName);
-      if (pool != nullptr && !pool->isDestroyed() &&
-          pool->getPRSingleHopEnabled()) {
-        ThinClientPoolDM* poolDM = dynamic_cast<ThinClientPoolDM*>(pool.get());
-        if ((poolDM != nullptr) &&
-            (poolDM->getClientMetaDataService() != nullptr)) {
+    m_regions.emplace(regionPtr->getName(), regionPtr);
+  }
+
+  // When region is created, added that region name in client meta data
+  // service to fetch its metadata for single hop.
+  const auto& poolName = regionAttributes.getPoolName();
+  if (!poolName.empty()) {
+    const auto& pool = getPoolManager().find(poolName);
+    if (pool && !pool->isDestroyed() && pool->getPRSingleHopEnabled()) {
+      if (const auto& poolDM =
+              std::dynamic_pointer_cast<ThinClientPoolDM>(pool)) {
+        if (auto clientMetaDataService = poolDM->getClientMetaDataService()) {
           LOGFINE("enqueued region " + name +
                   " for initial metadata refresh for singlehop ");
           poolDM->getClientMetaDataService()->enqueueForMetadataRefresh(
@@ -483,6 +468,15 @@ void CacheImpl::createRegion(std::string name,
   rpImpl->releaseReadLock();
 }
 
+std::shared_ptr<Region> CacheImpl::findRegion(const std::string& name) {
+  auto&& lock = m_regions.make_lock<std::lock_guard>();
+  const auto& find = m_regions.find(name);
+  if (find != m_regions.end()) {
+    return find->second;
+  }
+  return nullptr;
+}
+
 std::shared_ptr<Region> CacheImpl::getRegion(const std::string& path) {
   LOGDEBUG("Cache::getRegion " + path);
 
@@ -494,34 +488,30 @@ std::shared_ptr<Region> CacheImpl::getRegion(const std::string& path) {
     return nullptr;
   }
 
-  MapOfRegionGuard guard(m_regions->mutex());
   static const std::string slash("/");
   if (path == slash || path.length() < 1) {
     LOGERROR("Cache::getRegion: path [" + path + "] is not valid.");
     throw IllegalArgumentException("Cache::getRegion: path is empty or a /");
   }
+
   auto fullname = path;
   if (fullname.substr(0, 1) == slash) {
     fullname = path.substr(1);
   }
 
   // find second separator
-  auto idx = static_cast<uint32_t>(fullname.find('/'));
+  auto idx = fullname.find('/');
   auto stepname = fullname.substr(0, idx);
 
-  std::shared_ptr<Region> region = nullptr;
-
-  if (0 == m_regions->find(stepname, region)) {
+  auto region = findRegion(stepname);
+  if (region) {
     if (stepname != fullname) {
       auto remainder = fullname.substr(stepname.length() + 1);
-
-      if (region != nullptr) {
-        region = region->getSubregion(remainder.c_str());
-      }
+      region = region->getSubregion(remainder);
     }
   }
 
-  if (region != nullptr && isPoolInMultiuserMode(region)) {
+  if (region && isPoolInMultiuserMode(region)) {
     LOGWARN("Pool " + region->getAttributes().getPoolName() +
             " attached with region " + region->getFullPath() +
             " is in multiuser authentication mode. Operations may fail as "
@@ -602,15 +592,14 @@ std::vector<std::shared_ptr<Region>> CacheImpl::rootRegions() {
 
   std::vector<std::shared_ptr<Region>> regions;
 
-  MapOfRegionGuard guard(m_regions->mutex());
+  auto&& lock = m_regions.make_lock();
 
-  if (m_regions->current_size() != 0) {
-    regions.reserve(static_cast<int32_t>(m_regions->current_size()));
+  if (!m_regions.empty()) {
+    regions.reserve(m_regions.size());
 
-    for (MapOfRegionWithLock::iterator q = m_regions->begin();
-         q != m_regions->end(); ++q) {
-      if (!(*q).int_id_->isDestroyed()) {
-        regions.push_back((*q).int_id_);
+    for (const auto& kv : m_regions) {
+      if (!kv.second->isDestroyed()) {
+        regions.push_back(kv.second);
       }
     }
   }
@@ -629,7 +618,7 @@ void CacheImpl::initializeDeclarativeCache(const std::string& cacheXml) {
 }
 
 EvictionController* CacheImpl::getEvictionController() {
-  return m_evictionControllerPtr;
+  return m_evictionController.get();
 }
 
 void CacheImpl::readyForEvents() {
@@ -704,7 +693,7 @@ bool CacheImpl::getEndpointStatus(const std::string& endpoint) {
   auto& mutex = firstPool->m_endpointsLock;
   std::lock_guard<decltype(mutex)> guard(mutex);
   for (const auto& itr : firstPool->m_endpoints) {
-    const auto& ep = itr.int_id_;
+    auto ep = itr.second;
     if (ep->name().find(fullName) != std::string::npos) {
       return ep->getServerQueueStatusTEST();
     }
@@ -718,12 +707,12 @@ void CacheImpl::processMarker() {
     return;
   }
 
-  MapOfRegionGuard guard(m_regions->mutex());
+  auto&& lock = m_regions.make_lock();
 
-  for (const auto& q : *m_regions) {
-    if (!q.int_id_->isDestroyed()) {
+  for (const auto& kv : m_regions) {
+    if (!kv.second->isDestroyed()) {
       if (const auto tcrHARegion =
-              std::dynamic_pointer_cast<ThinClientHARegion>(q.int_id_)) {
+              std::dynamic_pointer_cast<ThinClientHARegion>(kv.second)) {
         auto regionMsg = new TcrMessageClientMarker(
             new DataOutput(createDataOutput()), true);
         tcrHARegion->receiveNotification(regionMsg);
@@ -797,7 +786,8 @@ std::shared_ptr<SerializationRegistry> CacheImpl::getSerializationRegistry()
   return m_serializationRegistry;
 }
 
-ThreadPool* CacheImpl::getThreadPool() { return m_threadPool; }
+ThreadPool& CacheImpl::getThreadPool() { return m_threadPool; }
+
 std::shared_ptr<CacheTransactionManager>
 CacheImpl::getCacheTransactionManager() {
   this->throwIfClosed();
@@ -884,6 +874,11 @@ AuthenticatedView CacheImpl::createAuthenticatedView(
 }
 
 void CacheImpl::setCache(Cache* cache) { m_cache = cache; }
+
+void CacheImpl::setClientCrashTEST() {
+  m_tcrConnectionManager->setClientCrashTEST();
+}
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/CacheImpl.hpp b/cppcache/src/CacheImpl.hpp
index f60104c..3ab93ad 100644
--- a/cppcache/src/CacheImpl.hpp
+++ b/cppcache/src/CacheImpl.hpp
@@ -36,13 +36,12 @@
 #include "CachePerfStats.hpp"
 #include "ClientProxyMembershipIDFactory.hpp"
 #include "DistributedSystem.hpp"
-#include "EvictionController.hpp"
-#include "MapWithLock.hpp"
 #include "MemberListForVersionStamp.hpp"
 #include "NonCopyable.hpp"
 #include "PdxTypeRegistry.hpp"
 #include "RemoteQueryService.hpp"
-#include "TcrConnectionManager.hpp"
+#include "ThreadPool.hpp"
+#include "util/synchronized_map.hpp"
 
 #define DEFAULT_LRU_MAXIMUM_ENTRIES 100000
 /** @todo period '.' consistency */
@@ -64,6 +63,8 @@ class Pool;
 class RegionAttributes;
 class SerializationRegistry;
 class ThreadPool;
+class EvictionController;
+class TcrConnectionManager;
 
 /**
  * @class Cache Cache.hpp
@@ -94,7 +95,7 @@ class APACHE_GEODE_EXPORT CacheImpl : private NonCopyable,
   // drop
   void netDown();
   void revive();
-  void setClientCrashTEST() { m_tcrConnectionManager->setClientCrashTEST(); }
+  void setClientCrashTEST();
 
   // For PrSingleHop C++unit testing.
   void setNetworkHopFlag(bool networkhopflag) {
@@ -236,7 +237,7 @@ class APACHE_GEODE_EXPORT CacheImpl : private NonCopyable,
     return *m_tcrConnectionManager;
   }
 
-  int removeRegion(const char* name);
+  void removeRegion(const std::string& name);
 
   std::shared_ptr<QueryService> getQueryService(bool noInit = false);
 
@@ -308,7 +309,7 @@ class APACHE_GEODE_EXPORT CacheImpl : private NonCopyable,
     return m_distributedSystem.getSystemProperties();
   }
 
-  ThreadPool* getThreadPool();
+  ThreadPool& getThreadPool();
 
   inline const std::shared_ptr<AuthInitialize>& getAuthInitialize() {
     return m_authInitialize;
@@ -363,15 +364,15 @@ class APACHE_GEODE_EXPORT CacheImpl : private NonCopyable,
   void validateRegionAttributes(const std::string& name,
                                 const RegionAttributes attrs) const;
 
-  inline void getSubRegions(MapOfRegionWithLock& srm) {
-    MapOfRegionGuard guard(m_regions->mutex());
-    if (m_regions->current_size() == 0) return;
-    for (MapOfRegionWithLock::iterator p = m_regions->begin();
-         p != m_regions->end(); ++p) {
-      srm.bind((*p).ext_id_, (*p).int_id_);
-    }
+  inline void getSubRegions(
+      std::unordered_map<std::string, std::shared_ptr<Region>>& srm) {
+    auto&& lock = m_regions.make_lock<std::lock_guard>();
+    if (m_regions.empty()) return;
+    srm.insert(m_regions.begin(), m_regions.end());
   }
 
+  std::shared_ptr<Region> findRegion(const std::string& name);
+
   void setCache(Cache* cache);
 
   bool m_closed;
@@ -379,10 +380,12 @@ class APACHE_GEODE_EXPORT CacheImpl : private NonCopyable,
 
   DistributedSystem m_distributedSystem;
   ClientProxyMembershipIDFactory m_clientProxyMembershipIDFactory;
-  MapOfRegionWithLock* m_regions;
+  synchronized_map<std::unordered_map<std::string, std::shared_ptr<Region>>,
+                   std::recursive_mutex>
+      m_regions;
   Cache* m_cache;
   std::shared_ptr<CacheAttributes> m_attributes;
-  EvictionController* m_evictionControllerPtr;
+  std::unique_ptr<EvictionController> m_evictionController;
   TcrConnectionManager* m_tcrConnectionManager;
   std::shared_ptr<RemoteQueryService> m_remoteQueryServicePtr;
   ACE_RW_Thread_Mutex m_destroyCacheMutex;
@@ -395,7 +398,7 @@ class APACHE_GEODE_EXPORT CacheImpl : private NonCopyable,
   MemberListForVersionStamp& m_memberListForVersionStamp;
   std::shared_ptr<SerializationRegistry> m_serializationRegistry;
   std::shared_ptr<PdxTypeRegistry> m_pdxTypeRegistry;
-  ThreadPool* m_threadPool;
+  ThreadPool m_threadPool;
   const std::shared_ptr<AuthInitialize> m_authInitialize;
   std::unique_ptr<TypeRegistry> m_typeRegistry;
 
diff --git a/cppcache/src/ClientConnectionRequest.cpp b/cppcache/src/ClientConnectionRequest.cpp
index 8b9dd78..6c4c0be 100644
--- a/cppcache/src/ClientConnectionRequest.cpp
+++ b/cppcache/src/ClientConnectionRequest.cpp
@@ -29,8 +29,8 @@ void ClientConnectionRequest::toData(DataOutput& output) const {
   writeSetOfServerLocation(output);
 }
 
-DSFid ClientConnectionRequest::getDSFID() const {
-  return DSFid::ClientConnectionRequest;
+internal::DSFid ClientConnectionRequest::getDSFID() const {
+  return internal::DSFid::ClientConnectionRequest;
 }
 
 void ClientConnectionRequest::writeSetOfServerLocation(
diff --git a/cppcache/src/ClientConnectionRequest.hpp b/cppcache/src/ClientConnectionRequest.hpp
index 4327e1a..f1bf1f2 100644
--- a/cppcache/src/ClientConnectionRequest.hpp
+++ b/cppcache/src/ClientConnectionRequest.hpp
@@ -23,9 +23,10 @@
 #include <set>
 #include <string>
 
+#include <geode/internal/DataSerializableFixedId.hpp>
+
 #include "ServerLocation.hpp"
 #include "ServerLocationRequest.hpp"
-#include "TcrEndpoint.hpp"
 
 namespace apache {
 namespace geode {
@@ -40,7 +41,7 @@ class ClientConnectionRequest : public ServerLocationRequest {
         m_servergroup(servergroup),
         m_excludeServergroup_serverLocation(excludeServergroup) {}
   void toData(DataOutput& output) const override;
-  DSFid getDSFID() const override;
+  internal::DSFid getDSFID() const override;
   std::string getServerGroup() const { return m_servergroup; }
   const std::set<ServerLocation>& getExcludedServerGroup() const {
     return m_excludeServergroup_serverLocation;
diff --git a/cppcache/src/ClientMetadata.cpp b/cppcache/src/ClientMetadata.cpp
index e9acd57..2754190 100644
--- a/cppcache/src/ClientMetadata.cpp
+++ b/cppcache/src/ClientMetadata.cpp
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 #include "ClientMetadata.hpp"
 
 #include <climits>
@@ -289,17 +290,6 @@ void ClientMetadata::updateBucketServerLocations(
   }
 }
 
-void ClientMetadata::removeBucketServerLocation(BucketServerLocation) {}
-
-void ClientMetadata::populateDummyServers(int bucketId,
-                                          BucketServerLocationsType locations) {
-  // WriteGuard guard( m_readWriteLock );
-
-  checkBucketId(bucketId);
-
-  m_bucketServerLocationsList[bucketId] = locations;
-}
-
 int ClientMetadata::assignFixedBucketId(
     const char* partitionName, std::shared_ptr<CacheableKey> resolvekey) {
   LOGDEBUG(
diff --git a/cppcache/src/ClientMetadata.hpp b/cppcache/src/ClientMetadata.hpp
index 9e3865f..24a9a7c 100644
--- a/cppcache/src/ClientMetadata.hpp
+++ b/cppcache/src/ClientMetadata.hpp
@@ -84,11 +84,9 @@ class APACHE_GEODE_EXPORT ClientMetadata : public NonAssignable {
   // ServerLocation getPrimaryServerLocation(int bucketId);
   void updateBucketServerLocations(
       int bucketId, BucketServerLocationsType bucketServerLocations);
-  void removeBucketServerLocation(BucketServerLocation serverLocation);
   int getTotalNumBuckets();
   // std::shared_ptr<PartitionResolver> getPartitionResolver();
   const std::string& getColocatedWith();
-  void populateDummyServers(int bucketId, BucketServerLocationsType serverlist);
   int assignFixedBucketId(const char* partitionName,
                           std::shared_ptr<CacheableKey> resolvekey);
   std::shared_ptr<CacheableHashSet>& getFixedPartitionNames() {
diff --git a/cppcache/src/ClientMetadataService.cpp b/cppcache/src/ClientMetadataService.cpp
index ce57ece..d6f975c 100644
--- a/cppcache/src/ClientMetadataService.cpp
+++ b/cppcache/src/ClientMetadataService.cpp
@@ -19,13 +19,16 @@
 
 #include <climits>
 #include <cstdlib>
-#include <iterator>
-#include <unordered_set>
+
+#include <boost/thread/lock_types.hpp>
 
 #include <geode/FixedPartitionResolver.hpp>
 
+#include "ClientMetadata.hpp"
+#include "TcrConnectionManager.hpp"
 #include "TcrMessage.hpp"
 #include "ThinClientPoolDM.hpp"
+#include "util/queue.hpp"
 
 namespace apache {
 namespace geode {
@@ -35,80 +38,58 @@ const BucketStatus::clock::time_point BucketStatus::m_noTimeout{};
 
 const char* ClientMetadataService::NC_CMDSvcThread = "NC CMDSvcThread";
 
-ClientMetadataService::~ClientMetadataService() {
-  if (m_bucketWaitTimeout > std::chrono::milliseconds::zero()) {
-    try {
-      std::map<std::string, PRbuckets*>::iterator bi;
-      for (bi = m_bucketStatus.begin(); bi != m_bucketStatus.end(); ++bi) {
-        delete bi->second;
-      }
+ClientMetadataService::ClientMetadataService(ThinClientPoolDM* pool)
+    : m_run(false),
+      m_pool(pool),
+      m_cache(m_pool->getConnectionManager().getCacheImpl()),
+      m_regionQueue(false),
+      m_bucketWaitTimeout(m_cache->getDistributedSystem()
+                              .getSystemProperties()
+                              .bucketWaitTimeout())
 
-    } catch (...) {
-      LOGINFO("Exception in ClientMetadataService destructor");
-    }
-  }
+{}
+
+void ClientMetadataService::start() {
+  m_run = true;
+  m_thread = std::thread(&ClientMetadataService::svc, this);
 }
 
-ClientMetadataService::ClientMetadataService(Pool* pool)
-    : m_run(false),
-      m_pool(pool),
-      m_regionQueue(false)
-
-{
-  auto tcrdm = dynamic_cast<ThinClientPoolDM*>(m_pool);
-  auto cacheImpl = tcrdm->getConnectionManager().getCacheImpl();
-  m_bucketWaitTimeout = cacheImpl->getDistributedSystem()
-                            .getSystemProperties()
-                            .bucketWaitTimeout();
+void ClientMetadataService::stop() {
+  m_run = false;
+  m_regionQueueCondition.notify_one();
+  m_thread.join();
 }
 
-int ClientMetadataService::svc() {
+void ClientMetadataService::svc() {
   DistributedSystemImpl::setThreadName(NC_CMDSvcThread);
+
   LOGINFO("ClientMetadataService started for pool " + m_pool->getName());
+
   while (m_run) {
-    m_regionQueueSema.acquire();
-    auto tcrdm = dynamic_cast<ThinClientPoolDM*>(m_pool);
-    auto&& cache = tcrdm->getConnectionManager().getCacheImpl();
-    while (true) {
-      auto&& regionFullPath = m_regionQueue.get();
-
-      if (regionFullPath) {
-        while (true) {
-          if (m_regionQueue.size() > 0) {
-            auto&& nextRegionFullPath = m_regionQueue.get();
-            if (nextRegionFullPath != nullptr &&
-                nextRegionFullPath->c_str() != nullptr &&
-                regionFullPath->compare(nextRegionFullPath->c_str()) == 0) {
-            } else {
-              // different region; put it back
-              m_regionQueue.put(nextRegionFullPath);
-              break;
-            }
-          } else {
-            break;
-          }
-        }
-      }
+    std::unique_lock<std::mutex> lock(m_regionQueueMutex);
+    m_regionQueueCondition.wait(
+        lock, [this] { return !m_run || !m_regionQueue.empty(); });
+    if (!m_run) {
+      break;
+    }
 
-      if (!cache->isCacheDestroyPending() && regionFullPath) {
-        getClientPRMetadata(regionFullPath->c_str());
-      } else {
-        break;
-      }
+    auto regionFullPath = std::move(m_regionQueue.front());
+    m_regionQueue.pop_front();
+    queue::coalesce(m_regionQueue, regionFullPath);
+
+    if (!m_cache->isCacheDestroyPending()) {
+      lock.unlock();
+      getClientPRMetadata(regionFullPath.c_str());
+    } else {
+      break;
     }
-    // while(m_regionQueueSema.tryacquire( ) != -1); // release all
   }
+
   LOGINFO("ClientMetadataService stopped for pool " + m_pool->getName());
-  return 0;
 }
 
 void ClientMetadataService::getClientPRMetadata(const char* regionFullPath) {
   if (regionFullPath == nullptr) return;
-  ThinClientPoolDM* tcrdm = dynamic_cast<ThinClientPoolDM*>(m_pool);
-  if (tcrdm == nullptr) {
-    throw IllegalArgumentException(
-        "ClientMetaData: pool cast to ThinClientPoolDM failed");
-  }
   // That means metadata for the region not found, So only for the first time
   // for a particular region use GetClientPartitionAttributesOp
   // TcrMessage to fetch the metadata and put it into map for later use.send
@@ -117,8 +98,9 @@ void ClientMetadataService::getClientPRMetadata(const char* regionFullPath) {
   std::string path(regionFullPath);
   std::shared_ptr<ClientMetadata> cptr = nullptr;
   {
-    ReadGuard guard(m_regionMetadataLock);
-    RegionMetadataMapType::iterator itr = m_regionMetaDataMap.find(path);
+    boost::shared_lock<decltype(m_regionMetadataLock)> lock(
+        m_regionMetadataLock);
+    const auto& itr = m_regionMetaDataMap.find(path);
     if (itr != m_regionMetaDataMap.end()) {
       cptr = itr->second;
     }
@@ -127,22 +109,20 @@ void ClientMetadataService::getClientPRMetadata(const char* regionFullPath) {
 
   if (cptr == nullptr) {
     TcrMessageGetClientPartitionAttributes request(
-        new DataOutput(tcrdm->getConnectionManager()
-                           .getCacheImpl()
-                           ->getCache()
-                           ->createDataOutput()),
-        regionFullPath);
-    GfErrType err = tcrdm->sendSyncRequest(request, reply);
+        new DataOutput(m_cache->createDataOutput(m_pool)), regionFullPath);
+    GfErrType err = m_pool->sendSyncRequest(request, reply);
     if (err == GF_NOERR &&
         reply.getMessageType() ==
             TcrMessage::RESPONSE_CLIENT_PARTITION_ATTRIBUTES) {
       cptr = std::make_shared<ClientMetadata>(reply.getNumBuckets(),
-                                              reply.getColocatedWith(), tcrdm,
+                                              reply.getColocatedWith(), m_pool,
                                               reply.getFpaSet());
       if (m_bucketWaitTimeout > std::chrono::milliseconds::zero() &&
           reply.getNumBuckets() > 0) {
-        WriteGuard guard(m_PRbucketStatusLock);
-        m_bucketStatus[regionFullPath] = new PRbuckets(reply.getNumBuckets());
+        boost::unique_lock<decltype(m_PRbucketStatusLock)> lock(
+            m_PRbucketStatusLock);
+        m_bucketStatus[regionFullPath] =
+            std::unique_ptr<PRbuckets>(new PRbuckets(reply.getNumBuckets()));
       }
       LOGDEBUG("ClientMetadata buckets %d ", reply.getNumBuckets());
     }
@@ -159,7 +139,8 @@ void ClientMetadataService::getClientPRMetadata(const char* regionFullPath) {
     if (newCptr != nullptr) {
       cptr->setPreviousone(nullptr);
       newCptr->setPreviousone(cptr);
-      WriteGuard guard(m_regionMetadataLock);
+      boost::unique_lock<decltype(m_regionMetadataLock)> lock(
+          m_regionMetadataLock);
       m_regionMetaDataMap[path] = newCptr;
       LOGINFO("Updated client meta data");
     }
@@ -170,51 +151,43 @@ void ClientMetadataService::getClientPRMetadata(const char* regionFullPath) {
       cptr->setPreviousone(nullptr);
       newCptr->setPreviousone(cptr);
       // now we will get new instance so assign it again
-      WriteGuard guard(m_regionMetadataLock);
+      boost::unique_lock<decltype(m_regionMetadataLock)> lock(
+          m_regionMetadataLock);
       m_regionMetaDataMap[colocatedWith.c_str()] = newCptr;
       m_regionMetaDataMap[path] = newCptr;
       LOGINFO("Updated client meta data");
     }
   }
 }
+
 std::shared_ptr<ClientMetadata> ClientMetadataService::SendClientPRMetadata(
     const char* regionPath, std::shared_ptr<ClientMetadata> cptr) {
-  ThinClientPoolDM* tcrdm = dynamic_cast<ThinClientPoolDM*>(m_pool);
-  if (tcrdm == nullptr) {
-    throw IllegalArgumentException(
-        "ClientMetaData: pool cast to ThinClientPoolDM failed");
-  }
   TcrMessageGetClientPrMetadata request(
-      new DataOutput(
-          tcrdm->getConnectionManager().getCacheImpl()->createDataOutput()),
-      regionPath);
+      new DataOutput(m_cache->createDataOutput(m_pool)), regionPath);
   TcrMessageReply reply(true, nullptr);
   // send this message to server and get metadata from server.
   LOGFINE("Now sending GET_CLIENT_PR_METADATA for getting from server: %s",
           regionPath);
   std::shared_ptr<Region> region = nullptr;
-  GfErrType err = tcrdm->sendSyncRequest(request, reply);
+  GfErrType err = m_pool->sendSyncRequest(request, reply);
   if (err == GF_NOERR &&
       reply.getMessageType() == TcrMessage::RESPONSE_CLIENT_PR_METADATA) {
-    region =
-        tcrdm->getConnectionManager().getCacheImpl()->getRegion(regionPath);
+    region = m_cache->getRegion(regionPath);
     if (region != nullptr) {
-      LocalRegion* lregion = dynamic_cast<LocalRegion*>(region.get());
-      lregion->getRegionStats()->incMetaDataRefreshCount();
+      if (auto lregion = std::dynamic_pointer_cast<LocalRegion>(region)) {
+        lregion->getRegionStats()->incMetaDataRefreshCount();
+      }
     }
-    std::vector<BucketServerLocationsType>* metadata = reply.getMetadata();
+    auto metadata = reply.getMetadata();
     if (metadata == nullptr) return nullptr;
     if (metadata->empty()) {
       delete metadata;
       return nullptr;
     }
     auto newCptr = std::make_shared<ClientMetadata>(*cptr);
-    for (std::vector<BucketServerLocationsType>::iterator iter =
-             metadata->begin();
-         iter != metadata->end(); ++iter) {
-      if (!(*iter).empty()) {
-        newCptr->updateBucketServerLocations((*iter).at(0)->getBucketId(),
-                                             (*iter));
+    for (const auto& v : *metadata) {
+      if (!v.empty()) {
+        newCptr->updateBucketServerLocations(v.at(0)->getBucketId(), v);
       }
     }
     delete metadata;
@@ -230,7 +203,8 @@ void ClientMetadataService::getBucketServerLocation(
     const std::shared_ptr<Serializable>& aCallbackArgument, bool isPrimary,
     std::shared_ptr<BucketServerLocation>& serverLocation, int8_t& version) {
   if (region != nullptr) {
-    ReadGuard guard(m_regionMetadataLock);
+    boost::shared_lock<decltype(m_regionMetadataLock)> lock(
+        m_regionMetadataLock);
     LOGDEBUG(
         "ClientMetadataService::getBucketServerLocation m_regionMetaDataMap "
         "size is %d",
@@ -277,35 +251,28 @@ void ClientMetadataService::getBucketServerLocation(
 
 std::shared_ptr<ClientMetadata> ClientMetadataService::getClientMetadata(
     const std::string& regionFullPath) {
-  ReadGuard guard(m_regionMetadataLock);
-  RegionMetadataMapType::iterator regionMetadataIter =
-      m_regionMetaDataMap.find(regionFullPath);
-  if (regionMetadataIter != m_regionMetaDataMap.end()) {
-    return (*regionMetadataIter).second;
+  boost::shared_lock<decltype(m_regionMetadataLock)> lock(m_regionMetadataLock);
+
+  const auto& entry = m_regionMetaDataMap.find(regionFullPath);
+  if (entry == m_regionMetaDataMap.end()) {
+    return nullptr;
   }
-  return nullptr;
+
+  return entry->second;
 }
 
-void ClientMetadataService::populateDummyServers(
-    const char* regionName, std::shared_ptr<ClientMetadata> cptr) {
-  WriteGuard guard(m_regionMetadataLock);
-  m_regionMetaDataMap[regionName] = cptr;
+std::shared_ptr<ClientMetadata> ClientMetadataService::getClientMetadata(
+    const std::shared_ptr<Region>& region) {
+  return getClientMetadata(region->getFullPath());
 }
 
 void ClientMetadataService::enqueueForMetadataRefresh(
     const std::string& regionFullPath, int8_t serverGroupFlag) {
-  ThinClientPoolDM* tcrdm = dynamic_cast<ThinClientPoolDM*>(m_pool);
-  if (tcrdm == nullptr) {
-    throw IllegalArgumentException(
-        "ClientMetaData: pool cast to ThinClientPoolDM failed");
-  }
-
-  auto cache = tcrdm->getConnectionManager().getCacheImpl();
-  auto region = cache->getRegion(regionFullPath);
+  auto region = m_cache->getRegion(regionFullPath);
 
-  std::string serverGroup = tcrdm->getServerGroup();
+  std::string serverGroup = m_pool->getServerGroup();
   if (serverGroup.length() != 0) {
-    cache->setServerGroupFlag(serverGroupFlag);
+    m_cache->setServerGroupFlag(serverGroupFlag);
     if (serverGroupFlag == 2) {
       LOGFINER(
           "Network hop but, from within same server-group, so no metadata "
@@ -315,7 +282,7 @@ void ClientMetadataService::enqueueForMetadataRefresh(
   }
 
   if (region != nullptr) {
-    ThinClientRegion* tcrRegion = dynamic_cast<ThinClientRegion*>(region.get());
+    auto tcrRegion = dynamic_cast<ThinClientRegion*>(region.get());
     {
       TryWriteGuard guardRegionMetaDataRefresh(
           tcrRegion->getMataDataMutex(), tcrRegion->getMetaDataRefreshed());
@@ -323,26 +290,16 @@ void ClientMetadataService::enqueueForMetadataRefresh(
         return;
       }
       LOGFINE("Network hop so fetching single hop metadata from the server");
-      cache->setNetworkHopFlag(true);
+      m_cache->setNetworkHopFlag(true);
       tcrRegion->setMetaDataRefreshed(true);
-      auto tempRegionPath = std::make_shared<std::string>(regionFullPath);
-      m_regionQueue.put(tempRegionPath);
-      m_regionQueueSema.release();
+      {
+        std::lock_guard<decltype(m_regionQueueMutex)> lock(m_regionQueueMutex);
+        m_regionQueue.push_back(regionFullPath);
+      }
+      m_regionQueueCondition.notify_one();
     }
   }
 }
-std::shared_ptr<ClientMetadata> ClientMetadataService::getClientMetadata(
-    const std::shared_ptr<Region>& region) {
-  ReadGuard guard(m_regionMetadataLock);
-
-  const auto& entry = m_regionMetaDataMap.find(region->getFullPath());
-
-  if (entry == m_regionMetaDataMap.end()) {
-    return nullptr;
-  }
-
-  return entry->second;
-}
 
 std::shared_ptr<ClientMetadataService::ServerToFilterMap>
 ClientMetadataService::getServerToFilterMap(
@@ -410,25 +367,25 @@ ClientMetadataService::getServerToFilterMap(
     keyList->push_back(key);
   }
 
-  if (keysWhichLeft.size() > 0 &&
-      serverToFilterMap->size() > 0) {  // add left keys in result
+  if (!keysWhichLeft.empty() && !serverToFilterMap->empty()) {
+    // add left keys in result
     auto keyLefts = keysWhichLeft.size();
     auto totalServers = serverToFilterMap->size();
     auto perServer = keyLefts / totalServers + 1;
 
     size_t keyIdx = 0;
     for (const auto& locationIter : *serverToFilterMap) {
-      const auto keys = locationIter.second;
+      const auto values = locationIter.second;
       for (size_t i = 0; i < perServer; i++) {
         if (keyIdx < keyLefts) {
-          keys->push_back(keysWhichLeft.at(keyIdx++));
+          values->push_back(keysWhichLeft.at(keyIdx++));
         } else {
           break;
         }
       }
       if (keyIdx >= keyLefts) break;  // done
     }
-  } else if (serverToFilterMap->size() == 0) {  // not be able to map any key
+  } else if (serverToFilterMap->empty()) {  // not be able to map any key
     return nullptr;  // it will force all keys to send to one server
   }
 
@@ -443,7 +400,7 @@ void ClientMetadataService::markPrimaryBucketForTimeout(
     std::shared_ptr<BucketServerLocation>& serverLocation, int8_t& version) {
   if (m_bucketWaitTimeout == std::chrono::milliseconds::zero()) return;
 
-  WriteGuard guard(m_PRbucketStatusLock);
+  boost::unique_lock<decltype(m_PRbucketStatusLock)> lock(m_PRbucketStatusLock);
 
   getBucketServerLocation(region, key, value, aCallbackArgument,
                           false /*look for secondary host*/, serverLocation,
@@ -455,8 +412,7 @@ void ClientMetadataService::markPrimaryBucketForTimeout(
              serverLocation->getPort());
     int32_t bId = serverLocation->getBucketId();
 
-    std::map<std::string, PRbuckets*>::iterator bs =
-        m_bucketStatus.find(region->getFullPath());
+    const auto& bs = m_bucketStatus.find(region->getFullPath());
 
     if (bs != m_bucketStatus.end()) {
       bs->second->setBucketTimeout(bId);
@@ -788,14 +744,12 @@ void ClientMetadataService::markPrimaryBucketForTimeoutButLookSecondaryBucket(
     std::shared_ptr<BucketServerLocation>& serverLocation, int8_t& version) {
   if (m_bucketWaitTimeout == std::chrono::milliseconds::zero()) return;
 
-  WriteGuard guard(m_PRbucketStatusLock);
-
-  std::map<std::string, PRbuckets*>::iterator bs =
-      m_bucketStatus.find(region->getFullPath());
+  boost::unique_lock<decltype(m_PRbucketStatusLock)> lock(m_PRbucketStatusLock);
 
   PRbuckets* prBuckets = nullptr;
+  const auto& bs = m_bucketStatus.find(region->getFullPath());
   if (bs != m_bucketStatus.end()) {
-    prBuckets = bs->second;
+    prBuckets = bs->second.get();
   }
 
   if (prBuckets == nullptr) return;
@@ -805,10 +759,10 @@ void ClientMetadataService::markPrimaryBucketForTimeoutButLookSecondaryBucket(
 
   std::shared_ptr<ClientMetadata> cptr = nullptr;
   {
-    ReadGuard guard(m_regionMetadataLock);
-    RegionMetadataMapType::iterator cptrIter =
-        m_regionMetaDataMap.find(region->getFullPath());
+    boost::shared_lock<decltype(m_regionMetadataLock)> lock(
+        m_regionMetadataLock);
 
+    const auto& cptrIter = m_regionMetaDataMap.find(region->getFullPath());
     if (cptrIter != m_regionMetaDataMap.end()) {
       cptr = cptrIter->second;
     }
@@ -820,9 +774,9 @@ void ClientMetadataService::markPrimaryBucketForTimeoutButLookSecondaryBucket(
 
   LOGFINE("Setting in markPrimaryBucketForTimeoutButLookSecondaryBucket");
 
-  int32_t totalBuckets = cptr->getTotalNumBuckets();
+  auto totalBuckets = cptr->getTotalNumBuckets();
 
-  for (int32_t i = 0; i < totalBuckets; i++) {
+  for (decltype(totalBuckets) i = 0; i < totalBuckets; i++) {
     int8_t version;
     std::shared_ptr<BucketServerLocation> bsl;
     cptr->getServerLocation(i, false, bsl, version);
@@ -840,15 +794,13 @@ bool ClientMetadataService::isBucketMarkedForTimeout(const char* regionFullPath,
                                                      int32_t bucketid) {
   if (m_bucketWaitTimeout == std::chrono::milliseconds::zero()) return false;
 
-  ReadGuard guard(m_PRbucketStatusLock);
+  boost::shared_lock<decltype(m_PRbucketStatusLock)> lock(m_PRbucketStatusLock);
 
   const auto& bs = m_bucketStatus.find(regionFullPath);
   if (bs != m_bucketStatus.end()) {
     bool m = bs->second->isBucketTimedOut(bucketid, m_bucketWaitTimeout);
     if (m) {
-      ThinClientPoolDM* tcrdm = dynamic_cast<ThinClientPoolDM*>(m_pool);
-      CacheImpl* cache = tcrdm->getConnectionManager().getCacheImpl();
-      cache->incBlackListBucketTimeouts();
+      m_cache->incBlackListBucketTimeouts();
     }
     LOGFINE("isBucketMarkedForTimeout:: for bucket %d returning = %d", bucketid,
             m);
diff --git a/cppcache/src/ClientMetadataService.hpp b/cppcache/src/ClientMetadataService.hpp
index 6411265..5de4379 100644
--- a/cppcache/src/ClientMetadataService.hpp
+++ b/cppcache/src/ClientMetadataService.hpp
@@ -20,13 +20,17 @@
 #ifndef GEODE_CLIENTMETADATASERVICE_H_
 #define GEODE_CLIENTMETADATASERVICE_H_
 
+#include <atomic>
 #include <chrono>
+#include <condition_variable>
+#include <deque>
 #include <memory>
+#include <mutex>
 #include <string>
+#include <thread>
 #include <unordered_map>
 
-#include <ace/Semaphore.h>
-#include <ace/Task.h>
+#include <boost/thread/shared_mutex.hpp>
 
 #include <geode/CacheableKey.hpp>
 #include <geode/Region.hpp>
@@ -34,17 +38,15 @@
 #include <geode/internal/functional.hpp>
 
 #include "BucketServerLocation.hpp"
-#include "ClientMetadata.hpp"
-#include "DistributedSystemImpl.hpp"
 #include "NonCopyable.hpp"
-#include "Queue.hpp"
 #include "ServerLocation.hpp"
 
 namespace apache {
 namespace geode {
 namespace client {
 
-class ClienMetadata;
+class ClientMetadata;
+class ThinClientPoolDM;
 
 typedef std::map<std::string, std::shared_ptr<ClientMetadata>>
     RegionMetadataMapType;
@@ -96,25 +98,17 @@ class PRbuckets {
   void setBucketTimeout(int32_t bucketId) { m_buckets[bucketId].setTimeout(); }
 };
 
-class ClientMetadataService : public ACE_Task_Base,
-                              private NonCopyable,
-                              private NonAssignable {
+class ClientMetadataService : private NonCopyable, private NonAssignable {
  public:
-  ~ClientMetadataService();
-  explicit ClientMetadataService(Pool* pool);
+  ClientMetadataService() = delete;
+  explicit ClientMetadataService(ThinClientPoolDM* pool);
+  inline ~ClientMetadataService() noexcept = default;
 
-  inline void start() {
-    m_run = true;
-    this->activate();
-  }
+  void start();
 
-  inline void stop() {
-    m_run = false;
-    m_regionQueueSema.release();
-    this->wait();
-  }
+  void stop();
 
-  int svc(void);
+  void svc(void);
 
   void getClientPRMetadata(const char* regionFullPath);
 
@@ -125,14 +119,9 @@ class ClientMetadataService : public ACE_Task_Base,
       const std::shared_ptr<Serializable>& aCallbackArgument, bool isPrimary,
       std::shared_ptr<BucketServerLocation>& serverLocation, int8_t& version);
 
-  void removeBucketServerLocation(BucketServerLocation serverLocation);
-
   std::shared_ptr<ClientMetadata> getClientMetadata(
       const std::string& regionFullPath);
 
-  void populateDummyServers(const char* regionName,
-                            std::shared_ptr<ClientMetadata> clientmetadata);
-
   void enqueueForMetadataRefresh(const std::string& regionFullPath,
                                  int8_t serverGroupFlag);
 
@@ -168,8 +157,6 @@ class ClientMetadataService : public ACE_Task_Base,
       dereference_hash<std::shared_ptr<BucketServerLocation>>,
       dereference_equal_to<std::shared_ptr<BucketServerLocation>>>
       ServerToBucketsMap;
-  // bool AreBucketSetsEqual(const BucketSet& currentBucketSet,
-  //                        const BucketSet& bucketSet);
 
   std::shared_ptr<BucketServerLocation> findNextServer(
       const ServerToBucketsMap& serverToBucketsMap,
@@ -204,13 +191,6 @@ class ClientMetadataService : public ACE_Task_Base,
       const BucketSet& buckets);
 
  private:
-  // const std::shared_ptr<PartitionResolver>& getResolver(const
-  // std::shared_ptr<Region>& region, const std::shared_ptr<CacheableKey>& key,
-  // const std::shared_ptr<Serializable>& aCallbackArgument);
-
-  // BucketServerLocation getServerLocation(std::shared_ptr<ClientMetadata>
-  // cptr, int bucketId, bool isPrimary);
-
   std::shared_ptr<ClientMetadata> SendClientPRMetadata(
       const char* regionPath, std::shared_ptr<ClientMetadata> cptr);
 
@@ -218,16 +198,17 @@ class ClientMetadataService : public ACE_Task_Base,
       const std::shared_ptr<Region>& region);
 
  private:
-  ACE_RW_Thread_Mutex m_regionMetadataLock;
-  ClientMetadataService();
-  ACE_Semaphore m_regionQueueSema;
+  std::thread m_thread;
+  boost::shared_mutex m_regionMetadataLock;
   RegionMetadataMapType m_regionMetaDataMap;
-  volatile bool m_run;
-  Pool* m_pool;
-  Queue<std::shared_ptr<std::string>> m_regionQueue;
-
-  ACE_RW_Thread_Mutex m_PRbucketStatusLock;
-  std::map<std::string, PRbuckets*> m_bucketStatus;
+  std::atomic<bool> m_run;
+  ThinClientPoolDM* m_pool;
+  CacheImpl* m_cache;
+  std::deque<std::string> m_regionQueue;
+  std::mutex m_regionQueueMutex;
+  std::condition_variable m_regionQueueCondition;
+  boost::shared_mutex m_PRbucketStatusLock;
+  std::map<std::string, std::unique_ptr<PRbuckets>> m_bucketStatus;
   std::chrono::milliseconds m_bucketWaitTimeout;
   static const char* NC_CMDSvcThread;
 };
diff --git a/cppcache/src/ClientReplacementRequest.cpp b/cppcache/src/ClientReplacementRequest.cpp
index 953cc50..64a565f 100644
--- a/cppcache/src/ClientReplacementRequest.cpp
+++ b/cppcache/src/ClientReplacementRequest.cpp
@@ -29,8 +29,8 @@ void ClientReplacementRequest::toData(DataOutput& output) const {
   this->m_serverLocation.toData(output);
 }
 
-DSFid ClientReplacementRequest::getDSFID() const {
-  return DSFid::ClientReplacementRequest;
+internal::DSFid ClientReplacementRequest::getDSFID() const {
+  return internal::DSFid::ClientReplacementRequest;
 }
 
 }  // namespace client
diff --git a/cppcache/src/ClientReplacementRequest.hpp b/cppcache/src/ClientReplacementRequest.hpp
index a873455..e712284 100644
--- a/cppcache/src/ClientReplacementRequest.hpp
+++ b/cppcache/src/ClientReplacementRequest.hpp
@@ -23,10 +23,11 @@
 #include <set>
 #include <string>
 
+#include <geode/internal/DataSerializableFixedId.hpp>
+
 #include "ClientConnectionRequest.hpp"
 #include "ServerLocation.hpp"
 #include "ServerLocationRequest.hpp"
-#include "TcrEndpoint.hpp"
 
 namespace apache {
 namespace geode {
@@ -43,7 +44,7 @@ class ClientReplacementRequest : public ClientConnectionRequest {
   ~ClientReplacementRequest() override = default;
 
   void toData(DataOutput& output) const override;
-  DSFid getDSFID() const override;
+  internal::DSFid getDSFID() const override;
 
  private:
   const ServerLocation m_serverLocation;
diff --git a/cppcache/src/CqEventImpl.cpp b/cppcache/src/CqEventImpl.cpp
index eb32e7e..ada5619 100644
--- a/cppcache/src/CqEventImpl.cpp
+++ b/cppcache/src/CqEventImpl.cpp
@@ -14,10 +14,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 #include "CqEventImpl.hpp"
 
 #include <geode/CacheableString.hpp>
 
+#include "TcrConnectionManager.hpp"
 #include "TcrMessage.hpp"
 #include "ThinClientCacheDistributionManager.hpp"
 #include "ThinClientPoolHADM.hpp"
diff --git a/cppcache/src/CqQueryImpl.cpp b/cppcache/src/CqQueryImpl.cpp
index 1389d90..a7eadf8 100644
--- a/cppcache/src/CqQueryImpl.cpp
+++ b/cppcache/src/CqQueryImpl.cpp
@@ -23,6 +23,7 @@
 
 #include "ResultSetImpl.hpp"
 #include "StructSetImpl.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientRegion.hpp"
 #include "UserAttributes.hpp"
 #include "util/Log.hpp"
diff --git a/cppcache/src/CqQueryImpl.hpp b/cppcache/src/CqQueryImpl.hpp
index f03ddd7..3c1b246 100644
--- a/cppcache/src/CqQueryImpl.hpp
+++ b/cppcache/src/CqQueryImpl.hpp
@@ -34,7 +34,6 @@
 
 #include "CqQueryVsdStats.hpp"
 #include "CqService.hpp"
-#include "MapWithLock.hpp"
 
 /**
  * @file
diff --git a/cppcache/src/CqService.cpp b/cppcache/src/CqService.cpp
index 073db46..e872554 100644
--- a/cppcache/src/CqService.cpp
+++ b/cppcache/src/CqService.cpp
@@ -28,6 +28,7 @@
 #include "CqQueryImpl.hpp"
 #include "DistributedSystem.hpp"
 #include "ReadWriteLock.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientPoolDM.hpp"
 #include "util/exception.hpp"
 
@@ -41,14 +42,10 @@ CqService::CqService(ThinClientBaseDM* tccdm,
       m_statisticsFactory(statisticsFactory),
       m_notificationSema(1),
       m_stats(std::make_shared<CqServiceVsdStats>(m_statisticsFactory)) {
-  m_cqQueryMap = new MapOfCqQueryWithLock();
   m_running = true;
   LOGDEBUG("CqService Started");
 }
-CqService::~CqService() {
-  if (m_cqQueryMap != nullptr) delete m_cqQueryMap;
-  LOGDEBUG("CqService Destroyed");
-}
+CqService::~CqService() noexcept { LOGDEBUG("CqService Destroyed"); }
 
 void CqService::updateStats() {
   auto stats = std::dynamic_pointer_cast<CqServiceVsdStats>(m_stats);
@@ -56,14 +53,14 @@ void CqService::updateStats() {
   stats->setNumCqsActive(0);
   stats->setNumCqsStopped(0);
 
-  MapOfRegionGuard guard(m_cqQueryMap->mutex());
+  auto&& lock = m_cqQueryMap.make_lock();
 
-  stats->setNumCqsOnClient(static_cast<uint32_t>(m_cqQueryMap->current_size()));
+  stats->setNumCqsOnClient(static_cast<uint32_t>(m_cqQueryMap.size()));
 
-  if (m_cqQueryMap->current_size() == 0) return;
+  if (m_cqQueryMap.empty()) return;
 
-  for (auto q = m_cqQueryMap->begin(); q != m_cqQueryMap->end(); ++q) {
-    auto cquery = ((*q).int_id_);
+  for (const auto& kv : m_cqQueryMap) {
+    auto& cquery = kv.second;
     switch (cquery->getState()) {
       case CqState::RUNNING:
         stats->incNumCqsActive();
@@ -143,15 +140,9 @@ std::shared_ptr<CqQuery> CqService::newCq(
  * Adds the given CQ and cqQuery object into the CQ map.
  */
 void CqService::addCq(const std::string& cqName, std::shared_ptr<CqQuery>& cq) {
-  try {
-    MapOfRegionGuard guard(m_cqQueryMap->mutex());
-    std::shared_ptr<CqQuery> tmp;
-    if (0 == m_cqQueryMap->find(cqName, tmp)) {
-      throw CqExistsException("CQ with given name already exists. ");
-    }
-    m_cqQueryMap->bind(cqName, cq);
-  } catch (Exception& e) {
-    throw e;
+  auto result = m_cqQueryMap.emplace(cqName, cq);
+  if (!result.second) {
+    throw CqExistsException("CQ with given name already exists. ");
   }
 }
 
@@ -159,12 +150,7 @@ void CqService::addCq(const std::string& cqName, std::shared_ptr<CqQuery>& cq) {
  * Removes given CQ from the cqMap..
  */
 void CqService::removeCq(const std::string& cqName) {
-  try {
-    MapOfRegionGuard guard(m_cqQueryMap->mutex());
-    m_cqQueryMap->unbind(cqName);
-  } catch (Exception& e) {
-    throw e;
-  }
+  m_cqQueryMap.erase(cqName);
 }
 
 /**
@@ -172,12 +158,12 @@ void CqService::removeCq(const std::string& cqName) {
  * @return the CqQuery or null if not found
  */
 std::shared_ptr<CqQuery> CqService::getCq(const std::string& cqName) {
-  MapOfRegionGuard guard(m_cqQueryMap->mutex());
-  std::shared_ptr<CqQuery> tmp;
-  if (0 != m_cqQueryMap->find(cqName, tmp)) {
+  auto&& lock = m_cqQueryMap.make_lock();
+  const auto& found = m_cqQueryMap.find(cqName);
+  if (found == m_cqQueryMap.end()) {
     LOGWARN("Failed to get the specified CQ: %s", cqName.c_str());
   } else {
-    return tmp;
+    return found->second;
   }
   return nullptr;
 }
@@ -187,12 +173,7 @@ std::shared_ptr<CqQuery> CqService::getCq(const std::string& cqName) {
  */
 void CqService::clearCqQueryMap() {
   Log::fine("Cleaning clearCqQueryMap.");
-  try {
-    MapOfRegionGuard guard(m_cqQueryMap->mutex());
-    m_cqQueryMap->unbind_all();
-  } catch (Exception& e) {
-    throw e;
-  }
+  m_cqQueryMap.clear();
 }
 
 /**
@@ -200,11 +181,12 @@ void CqService::clearCqQueryMap() {
  */
 CqService::query_container_type CqService::getAllCqs() {
   CqService::query_container_type cqVec;
-  MapOfRegionGuard guard(m_cqQueryMap->mutex());
-  if (m_cqQueryMap->current_size() == 0) return cqVec;
-  cqVec.reserve(static_cast<int32_t>(m_cqQueryMap->current_size()));
-  for (auto& q : *m_cqQueryMap) {
-    cqVec.push_back(q.int_id_);
+  auto&& lock = m_cqQueryMap.make_lock();
+  if (!m_cqQueryMap.empty()) {
+    cqVec.reserve(m_cqQueryMap.size());
+    for (auto& kv : m_cqQueryMap) {
+      cqVec.push_back(kv.second);
+    }
   }
   return cqVec;
 }
@@ -368,7 +350,7 @@ void CqService::closeAllCqs() {
   Log::fine("closeAllCqs()");
   query_container_type cqVec = getAllCqs();
   Log::fine("closeAllCqs() 1");
-  MapOfRegionGuard guard(m_cqQueryMap->mutex());
+  auto&& lock = m_cqQueryMap.make_lock();
   Log::fine("closeAllCqs() 2");
   closeCqs(cqVec);
 }
@@ -393,16 +375,9 @@ void CqService::cleanup() {
  * @return true if exists else false.
  */
 bool CqService::isCqExists(const std::string& cqName) {
-  bool status = false;
-  try {
-    MapOfRegionGuard guard(m_cqQueryMap->mutex());
-    std::shared_ptr<CqQuery> tmp;
-    status = (0 == m_cqQueryMap->find(cqName, tmp));
-  } catch (Exception& ex) {
-    LOGFINE("Exception (%s) in isCQExists, ignored ",
-            ex.what());  // Ignore.
-  }
-  return status;
+  auto&& lock = m_cqQueryMap.make_lock();
+
+  return m_cqQueryMap.find(cqName) != m_cqQueryMap.end();
 }
 void CqService::receiveNotification(TcrMessage* msg) {
   invokeCqListeners(msg->getCqs(), msg->getMessageTypeForCq(), msg->getKey(),
diff --git a/cppcache/src/CqService.hpp b/cppcache/src/CqService.hpp
index 8d711be..5fed8de 100644
--- a/cppcache/src/CqService.hpp
+++ b/cppcache/src/CqService.hpp
@@ -33,27 +33,25 @@
 
 #include "CqServiceVsdStats.hpp"
 #include "DistributedSystem.hpp"
-#include "MapWithLock.hpp"
+#include "ErrType.hpp"
 #include "NonCopyable.hpp"
 #include "Queue.hpp"
 #include "TcrMessage.hpp"
-#include "ThinClientBaseDM.hpp"
-
-/**
- * @file
- */
+#include "util/synchronized_map.hpp"
 
 namespace apache {
 namespace geode {
 namespace client {
 
+class ThinClientBaseDM;
+class TcrEndpoint;
+
 /**
  * @class CqService CqService.hpp
  *
  * Implements the CqService functionality.
  *
  */
-
 class APACHE_GEODE_EXPORT CqService
     : private NonCopyable,
       private NonAssignable,
@@ -64,14 +62,13 @@ class APACHE_GEODE_EXPORT CqService
   ACE_Semaphore m_notificationSema;
 
   bool m_running;
-  MapOfCqQueryWithLock* m_cqQueryMap;
+  synchronized_map<std::unordered_map<std::string, std::shared_ptr<CqQuery>>,
+                   std::recursive_mutex>
+      m_cqQueryMap;
 
   std::shared_ptr<CqServiceStatistics> m_stats;
 
-  inline bool noCq() const {
-    MapOfRegionGuard guard(m_cqQueryMap->mutex());
-    return (0 == m_cqQueryMap->current_size());
-  }
+  inline bool noCq() const { return m_cqQueryMap.empty(); }
 
  public:
   typedef std::vector<std::shared_ptr<CqQuery>> query_container_type;
@@ -81,10 +78,11 @@ class APACHE_GEODE_EXPORT CqService
    */
   CqService(ThinClientBaseDM* tccdm,
             statistics::StatisticsFactory* statisticsFactory);
+  ~CqService() noexcept;
+
   ThinClientBaseDM* getDM() { return m_tccdm; }
 
   void receiveNotification(TcrMessage* msg);
-  ~CqService();
 
   /**
    * Returns the state of the cqService.
diff --git a/cppcache/src/DistributedSystemImpl.cpp b/cppcache/src/DistributedSystemImpl.cpp
index 836f10d..a648232 100644
--- a/cppcache/src/DistributedSystemImpl.cpp
+++ b/cppcache/src/DistributedSystemImpl.cpp
@@ -17,6 +17,12 @@
 
 #include "DistributedSystemImpl.hpp"
 
+#if defined(HAVE_PTHREAD_H)
+#include <pthread.h>
+#elif defined(_WIN32)
+#include <windows.h>
+#endif
+
 #include <boost/filesystem.hpp>
 
 #include <geode/SystemProperties.hpp>
@@ -141,6 +147,43 @@ void DistributedSystemImpl::unregisterCliCallback(int appdomainId) {
   }
 }
 
+void DistributedSystemImpl::setThreadName(const std::string& threadName) {
+  if (threadName.empty()) {
+    throw IllegalArgumentException("Thread name is empty.");
+  }
+
+#if defined(HAVE_pthread_setname_np)
+
+  pthread_setname_np(threadName.c_str());
+
+#elif defined(_WIN32)
+
+  const DWORD MS_VC_EXCEPTION = 0x406D1388;
+
+#pragma pack(push, 8)
+  typedef struct tagTHREADNAME_INFO {
+    DWORD dwType;      // Must be 0x1000.
+    LPCSTR szName;     // Pointer to name (in user addr space).
+    DWORD dwThreadID;  // Thread ID (-1=caller thread).
+    DWORD dwFlags;     // Reserved for future use, must be zero.
+  } THREADNAME_INFO;
+#pragma pack(pop)
+
+  THREADNAME_INFO info;
+  info.dwType = 0x1000;
+  info.szName = threadName.c_str();
+  info.dwThreadID = -1;
+  info.dwFlags = 0;
+
+  __try {
+    RaiseException(MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR),
+                   (ULONG_PTR*)&info);
+  } __except (EXCEPTION_EXECUTE_HANDLER) {
+  }
+
+#endif
+}
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/DistributedSystemImpl.hpp b/cppcache/src/DistributedSystemImpl.hpp
index 85f5102..ff3e2cc 100644
--- a/cppcache/src/DistributedSystemImpl.hpp
+++ b/cppcache/src/DistributedSystemImpl.hpp
@@ -20,10 +20,6 @@
 #ifndef GEODE_DISTRIBUTEDSYSTEMIMPL_H_
 #define GEODE_DISTRIBUTEDSYSTEMIMPL_H_
 
-/**
- * @file
- */
-
 #include <map>
 #include <memory>
 #include <mutex>
@@ -35,38 +31,24 @@
 #include "DistributedSystem.hpp"
 #include "statistics/StatisticsManager.hpp"
 
-#ifdef __linux
-#include <sys/prctl.h>
-#endif
-
 namespace apache {
 namespace geode {
 namespace client {
+
 class SystemProperties;
 
+class DistributedSystemImpl;
+
+using CliCallbackMethod = std::function<void(Cache&)>;
+
 /**
  * @class DistributedSystemImpl DistributedSystemImpl.hpp
  * A "connection" to a Geode distributed system.
  * The connection will be through a (host, port) pair.
  */
-
-class DistributedSystemImpl;
-
-using CliCallbackMethod = std::function<void(Cache&)>;
-
 class APACHE_GEODE_EXPORT DistributedSystemImpl {
-  /**
-   * @brief public methods
-   */
  public:
-  static void setThreadName(const std::string& threadName) {
-    if (threadName.empty()) {
-      throw IllegalArgumentException("Thread name is empty.");
-    }
-#ifdef __linux
-    prctl(PR_SET_NAME, threadName.c_str(), 0, 0, 0);
-#endif
-  }
+  static void setThreadName(const std::string& threadName);
 
   /**
    * @brief destructor
@@ -110,6 +92,7 @@ class APACHE_GEODE_EXPORT DistributedSystemImpl {
   std::unique_ptr<SystemProperties> m_sysProps;
   bool m_connected;
 };
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/EvictionController.cpp b/cppcache/src/EvictionController.cpp
index d2c366b..10b4a1c 100644
--- a/cppcache/src/EvictionController.cpp
+++ b/cppcache/src/EvictionController.cpp
@@ -18,13 +18,15 @@
 #include "EvictionController.hpp"
 
 #include <chrono>
-#include <string>
+
+#include <boost/thread/lock_types.hpp>
 
 #include "CacheImpl.hpp"
 #include "CacheRegionHelper.hpp"
 #include "DistributedSystem.hpp"
 #include "ReadWriteLock.hpp"
 #include "RegionInternal.hpp"
+#include "util/Log.hpp"
 
 namespace apache {
 namespace geode {
@@ -37,43 +39,59 @@ EvictionController::EvictionController(size_t maxHeapSize,
       m_maxHeapSize(maxHeapSize * 1024 * 1024),
       m_heapSizeDelta(heapSizeDelta),
       m_cacheImpl(cache),
-      m_currentHeapSize(0) {
-  evictionThreadPtr = new EvictionThread(this);
+      m_currentHeapSize(0),
+      m_evictionThread(this) {
   LOGINFO("Maximum heap size for Heap LRU set to %ld bytes", m_maxHeapSize);
-  //  m_currentHeapSize =
-  //  DistributedSystem::getSystemProperties()->gfHighWaterMark(),
-  //  DistributedSystem::getSystemProperties()->gfMessageSize();
 }
 
-EvictionController::~EvictionController() {
-  _GEODE_SAFE_DELETE(evictionThreadPtr);
+void EvictionController::start() {
+  m_evictionThread.start();
+
+  m_run = true;
+  m_thread = std::thread(&EvictionController::svc, this);
+
+  LOGFINE("Eviction Controller started");
 }
 
-void EvictionController::updateRegionHeapInfo(int64_t info) {
-  // LOGINFO("updateRegionHeapInfo is %d", info);
-  m_queue.put(info);
-  // We could block here if we wanted to prevent any further memory use
-  // until the evictions had been completed.
+void EvictionController::stop() {
+  m_run = false;
+  m_queueCondition.notify_one();
+  m_thread.join();
+
+  m_evictionThread.stop();
+
+  m_regions.clear();
+  m_queue.clear();
+
+  LOGFINE("Eviction controller stopped");
 }
 
-int EvictionController::svc() {
+void EvictionController::svc() {
   DistributedSystemImpl::setThreadName(NC_EC_Thread);
+
   int64_t pendingEvictions = 0;
-  while (m_run) {
-    int64_t readInfo = 0;
-    readInfo = m_queue.getFor(std::chrono::microseconds(1500));
-    if (readInfo == 0) continue;
 
-    processHeapInfo(readInfo, pendingEvictions);
-  }
-  auto size = m_queue.size();
-  for (decltype(size) i = 0; i < size; i++) {
-    int64_t readInfo = 0;
-    readInfo = m_queue.get();
-    if (readInfo == 0) continue;
-    processHeapInfo(readInfo, pendingEvictions);
+  while (m_run) {
+    std::unique_lock<std::mutex> lock(m_queueMutex);
+    m_queueCondition.wait(lock, [this] { return !m_run || !m_queue.empty(); });
+
+    while (!m_queue.empty()) {
+      auto readInfo = m_queue.front();
+      m_queue.pop_front();
+      if (0 != readInfo) {
+        processHeapInfo(readInfo, pendingEvictions);
+      }
+    }
   }
-  return 1;
+}
+
+void EvictionController::updateRegionHeapInfo(int64_t info) {
+  std::unique_lock<std::mutex> lock(m_queueMutex);
+  m_queue.push_back(info);
+  m_queueCondition.notify_one();
+
+  // We could block here if we wanted to prevent any further memory use
+  // until the evictions had been completed.
 }
 
 void EvictionController::processHeapInfo(int64_t& readInfo,
@@ -114,31 +132,29 @@ void EvictionController::processHeapInfo(int64_t& readInfo,
   }
 }
 
-void EvictionController::registerRegion(std::string& name) {
-  WriteGuard guard(m_regionLock);
+void EvictionController::registerRegion(const std::string& name) {
+  boost::unique_lock<decltype(m_regionLock)> lock(m_regionLock);
   m_regions.push_back(name);
-  LOGFINE("Registered region with Heap LRU eviction controller: name is %s",
-          name.c_str());
+  LOGFINE("Registered region with Heap LRU eviction controller: name is " +
+          name);
 }
 
-void EvictionController::deregisterRegion(std::string& name) {
+void EvictionController::deregisterRegion(const std::string& name) {
   // Iterate over regions vector and remove the one that we need to remove
-  WriteGuard guard(m_regionLock);
-  for (size_t i = 0; i < m_regions.size(); i++) {
-    std::string str = m_regions.at(i);
-    if (str == name) {
-      std::vector<std::string>::iterator iter = m_regions.begin();
-      m_regions.erase(iter + i);
-      LOGFINE(
-          "Deregistered region with Heap LRU eviction controller: name is %s",
-          name.c_str());
-      break;
-    }
+  boost::unique_lock<decltype(m_regionLock)> lock(m_regionLock);
+
+  const auto& removed =
+      std::remove_if(m_regions.begin(), m_regions.end(),
+                     [&](const std::string& region) { return region == name; });
+  if (removed != m_regions.cend()) {
+    LOGFINE("Deregistered region with Heap LRU eviction controller: name is " +
+            name);
   }
+  m_regions.erase(removed, m_regions.cend());
 }
 
 void EvictionController::orderEvictions(int32_t percentage) {
-  evictionThreadPtr->putEvictionInfo(percentage);
+  m_evictionThread.putEvictionInfo(percentage);
 }
 
 void EvictionController::evict(int32_t percentage) {
@@ -150,25 +166,23 @@ void EvictionController::evict(int32_t percentage) {
   // On the flip side, this requires a copy of the registered region list
   // every time eviction is ordered and that might not be cheap
   //@TODO: Discuss with team
-  VectorOfString regionTempVector;
+
+  decltype(m_regions) regionTempVector;
   {
-    ReadGuard guard(m_regionLock);
-    for (size_t i = 0; i < m_regions.size(); i++) {
-      regionTempVector.push_back(m_regions.at(i));
-    }
+    boost::shared_lock<decltype(m_regionLock)> lock(m_regionLock);
+    regionTempVector.reserve(m_regions.size());
+    regionTempVector.insert(regionTempVector.end(), m_regions.begin(),
+                            m_regions.end());
   }
 
-  for (size_t i = 0; i < regionTempVector.size(); i++) {
-    std::string region_name = regionTempVector.at(i);
-    auto region = m_cacheImpl->getRegion(region_name);
-    if (region != nullptr) {
-      RegionInternal* regionImpl = dynamic_cast<RegionInternal*>(region.get());
-      if (regionImpl != nullptr) {
-        regionImpl->evict(percentage);
-      }
+  for (const auto& regionName : regionTempVector) {
+    if (auto region = std::dynamic_pointer_cast<RegionInternal>(
+            m_cacheImpl->getRegion(regionName))) {
+      region->evict(percentage);
     }
   }
 }
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/EvictionController.hpp b/cppcache/src/EvictionController.hpp
index ddc6cdc..9e7ae72 100644
--- a/cppcache/src/EvictionController.hpp
+++ b/cppcache/src/EvictionController.hpp
@@ -1,8 +1,3 @@
-#pragma once
-
-#ifndef GEODE_EVICTIONCONTROLLER_H_
-#define GEODE_EVICTIONCONTROLLER_H_
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -20,18 +15,27 @@
  * limitations under the License.
  */
 
-#include <memory>
+#pragma once
+
+#ifndef GEODE_EVICTIONCONTROLLER_H_
+#define GEODE_EVICTIONCONTROLLER_H_
+
+#include <atomic>
+#include <condition_variable>
+#include <mutex>
 #include <string>
+#include <thread>
 #include <vector>
 
-#include <ace/RW_Thread_Mutex.h>
-#include <ace/Task.h>
-
-#include <geode/DataOutput.hpp>
+#include <boost/thread/shared_mutex.hpp>
 
 #include "EvictionThread.hpp"
-#include "IntQueue.hpp"
-#include "util/Log.hpp"
+
+namespace apache {
+namespace geode {
+namespace client {
+
+class CacheImpl;
 
 /**
  * This class ensures that the cache consumes only as much memory as
@@ -58,46 +62,22 @@
  * When a region is destroyed, it deregisters itself with the EvictionController
  * Format of object that is put into the region map (int size, int numEntries)
  */
-namespace apache {
-namespace geode {
-namespace client {
-
-typedef IntQueue<int64_t> HeapSizeInfoQueue;
-typedef std::vector<std::string> VectorOfString;
-
-class EvictionController;
-class EvictionThread;
-class CacheImpl;
-
-class APACHE_GEODE_EXPORT EvictionController : public ACE_Task_Base {
+class EvictionController {
  public:
   EvictionController(size_t maxHeapSize, int32_t heapSizeDelta,
                      CacheImpl* cache);
 
-  ~EvictionController();
-
-  inline void start() {
-    m_run = true;
-    evictionThreadPtr->start();
-    this->activate();
-    LOGFINE("Eviction Controller started");
-  }
+  inline ~EvictionController() noexcept = default;
 
-  inline void stop() {
-    m_run = false;
-    evictionThreadPtr->stop();
-    this->wait();
-    m_regions.clear();
-    m_queue.clear();
+  void start();
 
-    LOGFINE("Eviction controller stopped");
-  }
+  void stop();
 
-  int svc(void);
+  void svc(void);
 
   void updateRegionHeapInfo(int64_t info);
-  void registerRegion(std::string& name);
-  void deregisterRegion(std::string& name);
+  void registerRegion(const std::string& name);
+  void deregisterRegion(const std::string& name);
   void evict(int32_t percentage);
 
  private:
@@ -105,17 +85,21 @@ class APACHE_GEODE_EXPORT EvictionController : public ACE_Task_Base {
   void processHeapInfo(int64_t& readInfo, int64_t& pendingEvictions);
 
  private:
-  bool m_run;
+  std::thread m_thread;
+  std::atomic<bool> m_run;
   int64_t m_maxHeapSize;
   int64_t m_heapSizeDelta;
   CacheImpl* m_cacheImpl;
   int64_t m_currentHeapSize;
-  HeapSizeInfoQueue m_queue;
-  VectorOfString m_regions;
-  mutable ACE_RW_Thread_Mutex m_regionLock;
-  EvictionThread* evictionThreadPtr;
+  std::deque<int64_t> m_queue;
+  std::mutex m_queueMutex;
+  std::condition_variable m_queueCondition;
+  std::vector<std::string> m_regions;
+  boost::shared_mutex m_regionLock;
+  EvictionThread m_evictionThread;
   static const char* NC_EC_Thread;
 };
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/EvictionThread.cpp b/cppcache/src/EvictionThread.cpp
index e28376e..fcdc9e7 100644
--- a/cppcache/src/EvictionThread.cpp
+++ b/cppcache/src/EvictionThread.cpp
@@ -21,35 +21,56 @@
 
 #include "DistributedSystemImpl.hpp"
 #include "EvictionController.hpp"
+#include "util/Log.hpp"
 
 namespace apache {
 namespace geode {
 namespace client {
 
 const char* EvictionThread::NC_Evic_Thread = "NC Evic Thread";
+
 EvictionThread::EvictionThread(EvictionController* parent)
-    : m_pParent(parent), m_run(false) {}
+    : m_run(false), m_pParent(parent) {}
+
+void EvictionThread::start() {
+  m_run = true;
+  m_thread = std::thread(&EvictionThread::svc, this);
+
+  LOGFINE("Eviction Thread started");
+}
 
-int EvictionThread::svc(void) {
+void EvictionThread::stop() {
+  m_run = false;
+  m_queueCondition.notify_one();
+  m_thread.join();
+
+  m_queue.clear();
+
+  LOGFINE("Eviction Thread stopped");
+}
+
+void EvictionThread::svc(void) {
   DistributedSystemImpl::setThreadName(NC_Evic_Thread);
+
   while (m_run) {
-    processEvictions();
-  }
-  auto size = m_queue.size();
-  for (decltype(size) i = 0; i < size; i++) {
-    processEvictions();
-  }
-  return 1;
-}
+    std::unique_lock<std::mutex> lock(m_queueMutex);
+    m_queueCondition.wait(lock, [this] { return !m_run || !m_queue.empty(); });
 
-void EvictionThread::processEvictions() {
-  auto percentageToEvict = m_queue.getFor(std::chrono::microseconds(1500));
-  if (percentageToEvict != 0) {
-    m_pParent->evict(percentageToEvict);
+    while (!m_queue.empty()) {
+      auto percentageToEvict = m_queue.front();
+      m_queue.pop_front();
+      if (0 != percentageToEvict) {
+        m_pParent->evict(percentageToEvict);
+      }
+    }
   }
 }
 
-void EvictionThread::putEvictionInfo(int32_t info) { m_queue.put(info); }
+void EvictionThread::putEvictionInfo(int32_t info) {
+  std::unique_lock<std::mutex> lock(m_queueMutex);
+  m_queue.push_back(info);
+  m_queueCondition.notify_one();
+}
 
 }  // namespace client
 }  // namespace geode
diff --git a/cppcache/src/EvictionThread.hpp b/cppcache/src/EvictionThread.hpp
index 31ffebd..5a13282 100644
--- a/cppcache/src/EvictionThread.hpp
+++ b/cppcache/src/EvictionThread.hpp
@@ -20,12 +20,11 @@
 #ifndef GEODE_EVICTIONTHREAD_H_
 #define GEODE_EVICTIONTHREAD_H_
 
-#include <ace/Task.h>
-
-#include <geode/DataOutput.hpp>
-
-#include "IntQueue.hpp"
-#include "util/Log.hpp"
+#include <atomic>
+#include <condition_variable>
+#include <deque>
+#include <mutex>
+#include <thread>
 
 namespace apache {
 namespace geode {
@@ -36,31 +35,21 @@ class EvictionController;
 /**
  * This class does the actual evictions
  */
-class APACHE_GEODE_EXPORT EvictionThread : public ACE_Task_Base {
+class EvictionThread {
  public:
   explicit EvictionThread(EvictionController* parent);
-
-  inline void start() {
-    m_run = true;
-    this->activate();
-    LOGFINE("Eviction Thread started");
-  }
-
-  inline void stop() {
-    m_run = false;
-    this->wait();
-    m_queue.clear();
-    LOGFINE("Eviction Thread stopped");
-  }
-
-  int svc();
+  void start();
+  void stop();
+  void svc();
   void putEvictionInfo(int32_t info);
-  void processEvictions();
 
  private:
+  std::thread m_thread;
+  std::atomic<bool> m_run;
   EvictionController* m_pParent;
-  IntQueue<int32_t> m_queue;
-  bool m_run;
+  std::deque<int32_t> m_queue;
+  std::mutex m_queueMutex;
+  std::condition_variable m_queueCondition;
 
   static const char* NC_Evic_Thread;
 };
diff --git a/cppcache/src/ExecutionImpl.cpp b/cppcache/src/ExecutionImpl.cpp
index 516a5d5..6340b12 100644
--- a/cppcache/src/ExecutionImpl.cpp
+++ b/cppcache/src/ExecutionImpl.cpp
@@ -24,6 +24,7 @@
 #include <geode/internal/geode_globals.hpp>
 
 #include "NoResult.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientPoolDM.hpp"
 #include "ThinClientRegion.hpp"
 #include "UserAttributes.hpp"
diff --git a/cppcache/src/IntQueue.hpp b/cppcache/src/IntQueue.hpp
deleted file mode 100644
index 667240e..0000000
--- a/cppcache/src/IntQueue.hpp
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#ifndef GEODE_INTQUEUE_H_
-#define GEODE_INTQUEUE_H_
-
-#include <chrono>
-#include <condition_variable>
-#include <deque>
-#include <mutex>
-
-namespace apache {
-namespace geode {
-namespace client {
-
-template <class T>
-class APACHE_GEODE_EXPORT IntQueue {
- public:
-  inline IntQueue() = default;
-
-  inline ~IntQueue() noexcept = default;
-
-  T get() {
-    std::unique_lock<decltype(m_mutex)> _guard(m_mutex);
-    T mp = 0;
-    getInternal(mp);
-    return mp;
-  }
-
-  template <class _Rep, class _Period>
-  T getFor(const std::chrono::duration<_Rep, _Period>& duration) {
-    std::unique_lock<decltype(m_mutex)> _guard(m_mutex);
-    T mp = 0;
-    if (!getInternal(mp)) {
-      if (m_cond.wait_for(_guard, duration,
-                          [this] { return !m_queue.empty(); })) {
-        mp = m_queue.back();
-        m_queue.pop_back();
-      }
-    }
-    return mp;
-  }
-
-  void put(T mp) {
-    std::unique_lock<decltype(m_mutex)> _guard(m_mutex);
-    m_queue.push_front(mp);
-    if (m_queue.size() == 1) {
-      m_cond.notify_one();
-    }
-  }
-
-  size_t size() {
-    std::unique_lock<decltype(m_mutex)> _guard(m_mutex);
-    return m_queue.size();
-  }
-
-  void clear() {
-    std::unique_lock<decltype(m_mutex)> _guard(m_mutex);
-    m_queue.clear();
-  }
-
-  bool empty() {
-    std::unique_lock<decltype(m_mutex)> _guard(m_mutex);
-    return m_queue.empty();
-  }
-
- private:
-  inline bool getInternal(T& val) {
-    if (m_queue.size() > 0) {
-      val = m_queue.back();
-      m_queue.pop_back();
-      return true;
-    }
-    return false;
-  }
-
-  std::deque<T> m_queue;
-  std::mutex m_mutex;
-  std::condition_variable m_cond;
-};
-}  // namespace client
-}  // namespace geode
-}  // namespace apache
-
-#endif  // GEODE_INTQUEUE_H_
diff --git a/cppcache/src/InternalCacheTransactionManager2PCImpl.cpp b/cppcache/src/InternalCacheTransactionManager2PCImpl.cpp
index dd71ccf..3601d05 100644
--- a/cppcache/src/InternalCacheTransactionManager2PCImpl.cpp
+++ b/cppcache/src/InternalCacheTransactionManager2PCImpl.cpp
@@ -24,6 +24,7 @@
 #include "CacheRegionHelper.hpp"
 #include "CacheTransactionManagerImpl.hpp"
 #include "TXCleaner.hpp"
+#include "TcrConnectionManager.hpp"
 #include "TcrMessage.hpp"
 #include "ThinClientPoolDM.hpp"
 #include "util/exception.hpp"
diff --git a/cppcache/src/LRUEntriesMap.cpp b/cppcache/src/LRUEntriesMap.cpp
index e7248f4..394f5fe 100644
--- a/cppcache/src/LRUEntriesMap.cpp
+++ b/cppcache/src/LRUEntriesMap.cpp
@@ -20,6 +20,7 @@
 #include <mutex>
 
 #include "CacheImpl.hpp"
+#include "EvictionController.hpp"
 #include "ExpiryTaskManager.hpp"
 #include "LRUList.cpp"
 #include "MapSegment.hpp"
diff --git a/cppcache/src/LocalRegion.cpp b/cppcache/src/LocalRegion.cpp
index 8b37f64..08d5c10 100644
--- a/cppcache/src/LocalRegion.cpp
+++ b/cppcache/src/LocalRegion.cpp
@@ -33,6 +33,7 @@
 #include "RegionGlobalLocks.hpp"
 #include "SerializableHelper.hpp"
 #include "TXState.hpp"
+#include "TcrConnectionManager.hpp"
 #include "Utils.hpp"
 #include "VersionTag.hpp"
 #include "util/Log.hpp"
@@ -195,6 +196,16 @@ void LocalRegion::tombstoneOperationNoThrow(
     m_entries->reapTombstones(tombstoneKeys);
   }
 }
+
+std::shared_ptr<Region> LocalRegion::findSubRegion(const std::string& name) {
+  auto&& lock = m_subRegions.make_lock<std::lock_guard>();
+  const auto& find = m_subRegions.find(name);
+  if (find != m_subRegions.end()) {
+    return find->second;
+  }
+  return nullptr;
+}
+
 std::shared_ptr<Region> LocalRegion::getSubregion(const std::string& path) {
   CHECK_DESTROY_PENDING(TryReadGuard, LocalRegion::getSubregion);
 
@@ -203,25 +214,28 @@ std::shared_ptr<Region> LocalRegion::getSubregion(const std::string& path) {
     LOGERROR("Get subregion path [" + path + "] is not valid.");
     throw IllegalArgumentException("Get subegion path is empty or a /");
   }
+
   auto fullname = path;
   if (fullname.substr(0, 1) == slash) {
     fullname = path.substr(1);
   }
+
   // find second separator
-  size_t idx = fullname.find('/');
+  auto idx = fullname.find('/');
   auto stepname = fullname.substr(0, idx);
 
-  std::shared_ptr<Region> region, rptr;
-  if (0 == m_subRegions.find(stepname, region)) {
+  auto region = findSubRegion(stepname);
+  if (region) {
     if (stepname == fullname) {
       // done...
-      rptr = region;
+      return region;
     } else {
       std::string remainder = fullname.substr(stepname.length() + 1);
-      rptr = region->getSubregion(remainder.c_str());
+      return region->getSubregion(remainder);
     }
   }
-  return rptr;
+
+  return nullptr;
 }
 
 std::shared_ptr<Region> LocalRegion::createSubregion(
@@ -235,9 +249,9 @@ std::shared_ptr<Region> LocalRegion::createSubregion(
     }
   }
 
-  MapOfRegionGuard guard1(m_subRegions.mutex());
+  auto&& lock = m_subRegions.make_lock();
   std::shared_ptr<Region> region_ptr;
-  if (0 == m_subRegions.find(subregionName, region_ptr)) {
+  if (m_subRegions.find(subregionName) != m_subRegions.end()) {
     throw RegionExistsException(
         "LocalRegion::createSubregion: named region exists in the region");
   }
@@ -265,7 +279,7 @@ std::shared_ptr<Region> LocalRegion::createSubregion(
   }
 
   rPtr->acquireReadLock();
-  m_subRegions.bind(rPtr->getName(), std::shared_ptr<Region>(rPtr));
+  m_subRegions.emplace(rPtr->getName(), rPtr);
 
   // schedule the sub region expiry if regionExpiry enabled.
   rPtr->setRegionExpiryTask();
@@ -276,7 +290,7 @@ std::shared_ptr<Region> LocalRegion::createSubregion(
 std::vector<std::shared_ptr<Region>> LocalRegion::subregions(
     const bool recursive) {
   CHECK_DESTROY_PENDING(TryReadGuard, LocalRegion::subregions);
-  if (m_subRegions.current_size() == 0) {
+  if (m_subRegions.empty()) {
     return std::vector<std::shared_ptr<Region>>();
   }
 
@@ -789,16 +803,16 @@ bool LocalRegion::containsKey_internal(
 
 std::vector<std::shared_ptr<Region>> LocalRegion::subregions_internal(
     const bool recursive) {
-  MapOfRegionGuard guard(m_subRegions.mutex());
+  auto&& lock = m_subRegions.make_lock();
 
   std::vector<std::shared_ptr<Region>> regions;
-  regions.reserve(m_subRegions.current_size());
+  regions.reserve(m_subRegions.size());
 
-  for (const auto& entry : m_subRegions) {
-    const auto& subRegion = entry.int_id_;
+  for (const auto& kv : m_subRegions) {
+    const auto& subRegion = kv.second;
     regions.push_back(subRegion);
 
-    if (recursive == true) {
+    if (recursive) {
       if (auto localRegion =
               std::dynamic_pointer_cast<LocalRegion>(subRegion)) {
         auto subRegions = localRegion->subregions_internal(true);
@@ -2246,6 +2260,23 @@ GfErrType LocalRegion::invalidateLocal(
   return err;
 }
 
+GfErrType LocalRegion::invalidateRegionNoThrowOnSubRegions(
+    const std::shared_ptr<Serializable>& aCallbackArgument,
+    const CacheEventFlags eventFlags) {
+  auto&& lock = m_subRegions.make_lock();
+  for (const auto& kv : m_subRegions) {
+    if (auto subRegion = std::dynamic_pointer_cast<RegionInternal>(kv.second)) {
+      auto err =
+          subRegion->invalidateRegionNoThrow(aCallbackArgument, eventFlags);
+      if (err != GF_NOERR) {
+        return err;
+      }
+    }
+  }
+
+  return GF_NOERR;
+}
+
 GfErrType LocalRegion::invalidateRegionNoThrow(
     const std::shared_ptr<Serializable>& aCallbackArgument,
     const CacheEventFlags eventFlags) {
@@ -2254,9 +2285,9 @@ GfErrType LocalRegion::invalidateRegionNoThrow(
 
   if (m_regionAttributes.getCachingEnabled()) {
     std::vector<std::shared_ptr<CacheableKey>> v = keys_internal();
-    const auto size = v.size();
+    auto size = v.size();
     std::shared_ptr<MapEntryImpl> me;
-    for (size_t i = 0; i < size; i++) {
+    for (decltype(size) i = 0; i < size; i++) {
       {
         std::shared_ptr<Cacheable> oldValue;
         // invalidate all the entries with a nullptr versionTag
@@ -2278,20 +2309,11 @@ GfErrType LocalRegion::invalidateRegionNoThrow(
     if (err != GF_NOERR) return err;
   }
 
-  if (m_subRegions.current_size() > 0) {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> subguard(m_subRegions.mutex());
-    for (MapOfRegionWithLock::iterator p = m_subRegions.begin();
-         p != m_subRegions.end(); ++p) {
-      RegionInternal* subRegion =
-          dynamic_cast<RegionInternal*>((*p).int_id_.get());
-      if (subRegion != nullptr) {
-        err = subRegion->invalidateRegionNoThrow(aCallbackArgument, eventFlags);
-        if (err != GF_NOERR) {
-          return err;
-        }
-      }
-    }
+  err = invalidateRegionNoThrowOnSubRegions(aCallbackArgument, eventFlags);
+  if (err != GF_NOERR) {
+    return err;
   }
+
   err = invokeCacheListenerForRegionEvent(aCallbackArgument, eventFlags,
                                           AFTER_REGION_INVALIDATE);
 
@@ -2357,15 +2379,14 @@ GfErrType LocalRegion::destroyRegionNoThrow(
 
   LOGFINE("Region %s is being destroyed", m_fullPath.c_str());
   {
-    MapOfRegionGuard guard(m_subRegions.mutex());
-    for (MapOfRegionWithLock::iterator p = m_subRegions.begin();
-         p != m_subRegions.end(); ++p) {
+    auto&& lock = m_subRegions.make_lock();
+
+    for (const auto& kv : m_subRegions) {
       // TODO: remove unnecessary dynamic_cast by having m_subRegions hold
       // RegionInternal and invoke the destroy method in that
-      RegionInternal* subRegion =
-          dynamic_cast<RegionInternal*>((*p).int_id_.get());
-      if (subRegion != nullptr) {
-        //  for subregions never remove from parent since that will cause
+      if (auto subRegion =
+              std::dynamic_pointer_cast<RegionInternal>(kv.second)) {
+        // for subregions never remove from parent since that will cause
         // the region to be destroyed and SEGV; unbind_all takes care of that
         // Also don't send remote destroy message for sub-regions
         err = subRegion->destroyRegionNoThrow(
@@ -2378,7 +2399,7 @@ GfErrType LocalRegion::destroyRegionNoThrow(
       }
     }
   }
-  m_subRegions.unbind_all();
+  m_subRegions.clear();
 
   //  for the expiry case try the local destroy first and remote
   // destroy only if local destroy succeeds
@@ -2505,20 +2526,17 @@ void LocalRegion::entries_internal(
   m_entries->getEntries(me);
 
   if (recursive == true) {
-    MapOfRegionGuard guard(m_subRegions.mutex());
-    for (MapOfRegionWithLock::iterator p = m_subRegions.begin();
-         p != m_subRegions.end(); ++p) {
-      dynamic_cast<LocalRegion*>((*p).int_id_.get())
-          ->entries_internal(me, true);
+    auto&& lock = m_subRegions.make_lock();
+    for (const auto& kv : m_subRegions) {
+      if (auto subRegion = std::dynamic_pointer_cast<LocalRegion>(kv.second)) {
+        subRegion->entries_internal(me, true);
+      }
     }
   }
 }
 
-int LocalRegion::removeRegion(const std::string& name) {
-  if (m_subRegions.current_size() == 0) {
-    return 0;
-  }
-  return m_subRegions.unbind(name);
+void LocalRegion::removeRegion(const std::string& name) {
+  m_subRegions.erase(name);
 }
 
 bool LocalRegion::invokeCacheWriterForEntryEvent(
diff --git a/cppcache/src/LocalRegion.hpp b/cppcache/src/LocalRegion.hpp
index 1c067d3..194d0d5 100644
--- a/cppcache/src/LocalRegion.hpp
+++ b/cppcache/src/LocalRegion.hpp
@@ -1,8 +1,3 @@
-#pragma once
-
-#ifndef GEODE_LOCALREGION_H_
-#define GEODE_LOCALREGION_H_
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -20,9 +15,10 @@
  * limitations under the License.
  */
 
-/**
- * @file
- */
+#pragma once
+
+#ifndef GEODE_LOCALREGION_H_
+#define GEODE_LOCALREGION_H_
 
 #include <string>
 #include <unordered_map>
@@ -49,12 +45,12 @@
 #include "EntriesMapFactory.hpp"
 #include "EventType.hpp"
 #include "ExpMapEntry.hpp"
-#include "MapWithLock.hpp"
 #include "RegionInternal.hpp"
 #include "RegionStats.hpp"
 #include "SerializationRegistry.hpp"
 #include "TSSTXStateWrapper.hpp"
 #include "TombstoneList.hpp"
+#include "util/synchronized_map.hpp"
 
 namespace apache {
 namespace geode {
@@ -508,7 +504,9 @@ class APACHE_GEODE_EXPORT LocalRegion : public RegionInternal {
   /* protected attributes */
   std::string m_name;
   std::shared_ptr<Region> m_parentRegion;
-  MapOfRegionWithLock m_subRegions;
+  synchronized_map<std::unordered_map<std::string, std::shared_ptr<Region>>,
+                   std::recursive_mutex>
+      m_subRegions;
   std::string m_fullPath;
   volatile bool m_destroyPending;
   std::shared_ptr<CacheListener> m_listener;
@@ -527,7 +525,7 @@ class APACHE_GEODE_EXPORT LocalRegion : public RegionInternal {
   mutable ACE_RW_Thread_Mutex m_rwLock;
   std::vector<std::shared_ptr<CacheableKey>> keys_internal();
   bool containsKey_internal(const std::shared_ptr<CacheableKey>& keyPtr) const;
-  int removeRegion(const std::string& name);
+  void removeRegion(const std::string& name);
 
   bool invokeCacheWriterForEntryEvent(
       const std::shared_ptr<CacheableKey>& key,
@@ -575,6 +573,12 @@ class APACHE_GEODE_EXPORT LocalRegion : public RegionInternal {
       std::shared_ptr<EventId> eventId, std::shared_ptr<Cacheable>& fullObject,
       std::shared_ptr<VersionTag>& versionTag);
 
+ private:
+  std::shared_ptr<Region> findSubRegion(const std::string& name);
+  GfErrType invalidateRegionNoThrowOnSubRegions(
+      const std::shared_ptr<Serializable>& aCallbackArgument,
+      const CacheEventFlags eventFlags);
+
   // these classes encapsulate actions specific to update operations
   // used by the template <code>updateNoThrow</code> class
   friend class PutActions;
@@ -584,6 +588,7 @@ class APACHE_GEODE_EXPORT LocalRegion : public RegionInternal {
   friend class RemoveActions;
   friend class InvalidateActions;
 };
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/MapSegment.cpp b/cppcache/src/MapSegment.cpp
index 13e1e5a..37d3a7b 100644
--- a/cppcache/src/MapSegment.cpp
+++ b/cppcache/src/MapSegment.cpp
@@ -47,7 +47,7 @@ void MapSegment::open(RegionInternal* region, const EntryFactory* entryFactory,
   uint32_t mapSize = TableOfPrimes::nextLargerPrime(size, m_primeIndex);
   LOGFINER("Initializing MapSegment with size %d (given size %d).", mapSize,
            size);
-  m_map->open(mapSize);
+  m_map->reserve(mapSize);
   m_entryFactory = entryFactory;
   m_region = region;
   m_tombstoneList =
@@ -57,11 +57,11 @@ void MapSegment::open(RegionInternal* region, const EntryFactory* entryFactory,
   m_concurrencyChecksEnabled = concurrencyChecksEnabled;
 }
 
-void MapSegment::close() { m_map->close(); }
+void MapSegment::close() {}
 
 void MapSegment::clear() {
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
-  m_map->unbind_all();
+  m_map->clear();
 }
 
 void MapSegment::lock() { m_segmentMutex.lock(); }
@@ -80,17 +80,19 @@ GfErrType MapSegment::create(const std::shared_ptr<CacheableKey>& key,
   {
     std::lock_guard<spinlock_mutex> lk(m_spinlock);
     // if size is greater than 75 percent of prime, rehash
-    uint32_t mapSize = TableOfPrimes::getPrime(m_primeIndex);
-    if (((m_map->current_size() * 75) / 100) > mapSize) {
+    auto mapSize = TableOfPrimes::getPrime(m_primeIndex);
+    if (((m_map->size() * 75) / 100) > mapSize) {
       rehash();
     }
-    std::shared_ptr<MapEntry> entry;
-    if ((m_map->find(key, entry)) == -1) {
+
+    const auto& find = m_map->find(key);
+    if (find == m_map->end()) {
       if ((err = putNoEntry(key, newValue, me, updateCount, destroyTracker,
                             versionTag)) != GF_NOERR) {
         return err;
       }
     } else {
+      auto& entry = find->second;
       auto entryImpl = entry->getImplPtr();
       entryImpl->getValueI(oldValue);
       if (oldValue == nullptr || CacheableToken::isTombstone(oldValue)) {
@@ -149,11 +151,12 @@ GfErrType MapSegment::put(const std::shared_ptr<CacheableKey>& key,
     std::lock_guard<spinlock_mutex> lk(m_spinlock);
     // if size is greater than 75 percent of prime, rehash
     uint32_t mapSize = TableOfPrimes::getPrime(m_primeIndex);
-    if (((m_map->current_size() * 75) / 100) > mapSize) {
+    if (((m_map->size() * 75) / 100) > mapSize) {
       rehash();
     }
-    std::shared_ptr<MapEntry> entry;
-    if ((m_map->find(key, entry)) == -1) {
+
+    const auto& find = m_map->find(key);
+    if (find == m_map->end()) {
       if (delta != nullptr) {
         return GF_INVALID_DELTA;  // You can not apply delta when there is no
       }
@@ -162,6 +165,7 @@ GfErrType MapSegment::put(const std::shared_ptr<CacheableKey>& key,
       err = putNoEntry(key, newValue, me, updateCount, destroyTracker,
                        versionTag);
     } else {
+      auto& entry = find->second;
       auto entryImpl = entry->getImplPtr();
       std::shared_ptr<Cacheable> meOldValue;
       entryImpl->getValueI(meOldValue);
@@ -212,8 +216,10 @@ GfErrType MapSegment::invalidate(const std::shared_ptr<CacheableKey>& key,
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
   isTokenAdded = false;
   GfErrType err = GF_NOERR;
-  std::shared_ptr<MapEntry> entry;
-  if ((m_map->find(key, entry)) != -1) {
+
+  const auto& find = m_map->find(key);
+  if (find != m_map->end()) {
+    auto entry = find->second;
     VersionStamp versionStamp;
     if (m_concurrencyChecksEnabled) {
       versionStamp = entry->getVersionStamp();
@@ -258,10 +264,11 @@ GfErrType MapSegment::removeWhenConcurrencyEnabled(
     bool& isEntryFound, ExpiryTaskManager::id_type expiryTaskID,
     TombstoneExpiryHandler* handler, bool& expTaskSet) {
   GfErrType err = GF_NOERR;
-  std::shared_ptr<MapEntry> entry;
   VersionStamp versionStamp;
   // If entry found, else return no entry
-  if ((m_map->find(key, entry)) != -1) {
+  const auto& find = m_map->find(key);
+  if (find != m_map->end()) {
+    auto entry = find->second;
     isEntryFound = true;
     // If the version tag is null, use the version tag of
     // the existing entry
@@ -346,8 +353,7 @@ GfErrType MapSegment::remove(const std::shared_ptr<CacheableKey>& key,
   }
 
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
-  std::shared_ptr<Cacheable> value;
-  if ((m_map->unbind(key, entry)) == -1) {
+  if (m_map->erase(key) == 0) {
     // didn't unbind, probably no entry...
     oldValue = nullptr;
     volatile int destroyTrackers = *m_numDestroyTrackers;
@@ -372,9 +378,8 @@ GfErrType MapSegment::remove(const std::shared_ptr<CacheableKey>& key,
 
 bool MapSegment::unguardedRemoveActualEntry(
     const std::shared_ptr<CacheableKey>& key, bool cancelTask) {
-  std::shared_ptr<MapEntry> entry;
   m_tombstoneList->eraseEntryFromTombstoneList(key, cancelTask);
-  if (m_map->unbind(key, entry) == -1) {
+  if (m_map->erase(key) == 0) {
     return false;
   }
   return true;
@@ -386,7 +391,7 @@ bool MapSegment::unguardedRemoveActualEntryWithoutCancelTask(
   std::shared_ptr<MapEntry> entry;
   taskid = m_tombstoneList->eraseEntryFromTombstoneListWithoutCancelTask(
       key, handler);
-  if (m_map->unbind(key, entry) == -1) {
+  if (m_map->erase(key) == 0) {
     return false;
   }
   return true;
@@ -404,12 +409,14 @@ bool MapSegment::getEntry(const std::shared_ptr<CacheableKey>& key,
                           std::shared_ptr<MapEntryImpl>& result,
                           std::shared_ptr<Cacheable>& value) {
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
-  std::shared_ptr<MapEntry> entry;
-  if ((m_map->find(key, entry)) == -1) {
+
+  const auto& find = m_map->find(key);
+  if (find == m_map->end()) {
     result = nullptr;
     value = nullptr;
     return false;
   }
+  auto entry = find->second;
 
   // If the value is a tombstone return not found
   auto mePtr = entry->getImplPtr();
@@ -428,10 +435,13 @@ bool MapSegment::getEntry(const std::shared_ptr<CacheableKey>& key,
  */
 bool MapSegment::containsKey(const std::shared_ptr<CacheableKey>& key) {
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
-  std::shared_ptr<MapEntry> mePtr;
-  if ((m_map->find(key, mePtr)) == -1) {
+
+  const auto& find = m_map->find(key);
+  if (find == m_map->end()) {
     return false;
   }
+  auto mePtr = find->second;
+
   // If the value is a tombstone return not found
   std::shared_ptr<Cacheable> value;
   auto mePtr1 = mePtr->getImplPtr();
@@ -446,12 +456,12 @@ bool MapSegment::containsKey(const std::shared_ptr<CacheableKey>& key) {
  */
 void MapSegment::getKeys(std::vector<std::shared_ptr<CacheableKey>>& result) {
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
-  for (CacheableKeyHashMap::iterator iter = m_map->begin();
-       iter != m_map->end(); iter++) {
+
+  for (const auto& kv : *m_map) {
     std::shared_ptr<Cacheable> valuePtr;
-    (*iter).int_id_->getImplPtr()->getValueI(valuePtr);
+    kv.second->getImplPtr()->getValueI(valuePtr);
     if (!CacheableToken::isTombstone(valuePtr)) {
-      result.push_back((*iter).ext_id_);
+      result.push_back(kv.first);
     }
   }
 }
@@ -461,13 +471,13 @@ void MapSegment::getKeys(std::vector<std::shared_ptr<CacheableKey>>& result) {
  */
 void MapSegment::getEntries(std::vector<std::shared_ptr<RegionEntry>>& result) {
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
-  for (CacheableKeyHashMap::iterator iter = m_map->begin();
-       iter != m_map->end(); iter++) {
+
+  for (const auto& kv : *m_map) {
     std::shared_ptr<CacheableKey> keyPtr;
     std::shared_ptr<Cacheable> valuePtr;
-    auto me = ((*iter).int_id_)->getImplPtr();
+    auto me = kv.second->getImplPtr();
     me->getValueI(valuePtr);
-    if (valuePtr != nullptr && !CacheableToken::isTombstone(valuePtr)) {
+    if (valuePtr && !CacheableToken::isTombstone(valuePtr)) {
       if (CacheableToken::isInvalid(valuePtr)) {
         valuePtr = nullptr;
       }
@@ -483,17 +493,17 @@ void MapSegment::getEntries(std::vector<std::shared_ptr<RegionEntry>>& result) {
  */
 void MapSegment::getValues(std::vector<std::shared_ptr<Cacheable>>& result) {
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
-  for (auto&& it : *m_map) {
-    auto&& entry = it.int_id_;
+  for (const auto& kv : *m_map) {
+    auto& entry = kv.second;
     std::shared_ptr<Cacheable> value;
     entry->getValue(value);
-    auto&& entryImpl = entry->getImplPtr();
+    auto entryImpl = entry->getImplPtr();
 
     if (value && !CacheableToken::isInvalid(value) &&
         !CacheableToken::isDestroyed(value) &&
         !CacheableToken::isTombstone(value)) {
       if (CacheableToken::isOverflowed(value)) {  // get Value from disc.
-        auto&& key = it.ext_id_;
+        auto& key = kv.first;
         value = getFromDisc(key, entryImpl);
         entryImpl->setValueI(value);
       }
@@ -513,8 +523,8 @@ int MapSegment::addTrackerForEntry(const std::shared_ptr<CacheableKey>& key,
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
   std::shared_ptr<MapEntry> entry;
   std::shared_ptr<MapEntry> newEntry;
-  int status;
-  if ((status = m_map->find(key, entry)) == -1) {
+  const auto& find = m_map->find(key);
+  if (find == m_map->end()) {
     oldValue = nullptr;
     if (addIfAbsent) {
       std::shared_ptr<MapEntryImpl> entryImpl;
@@ -528,6 +538,7 @@ int MapSegment::addTrackerForEntry(const std::shared_ptr<CacheableKey>& key,
       return -1;
     }
   } else {
+    entry = find->second;
     entry->getValue(oldValue);
     if (failIfPresent) {
       // return -1 without adding an entry; the callee should check on
@@ -542,11 +553,11 @@ int MapSegment::addTrackerForEntry(const std::shared_ptr<CacheableKey>& key,
   } else {
     updateCount = entry->addTracker(newEntry);
   }
-  if (newEntry != nullptr) {
-    if (status == -1) {
-      m_map->bind(key, newEntry);
+  if (newEntry) {
+    if (find == m_map->end()) {
+      m_map->emplace(key, newEntry);
     } else {
-      m_map->rebind(key, newEntry);
+      find->second = newEntry;
     }
   }
   return updateCount;
@@ -559,8 +570,10 @@ void MapSegment::removeTrackerForEntry(
     const std::shared_ptr<CacheableKey>& key) {
   if (m_concurrencyChecksEnabled) return;
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
-  std::shared_ptr<MapEntry> entry;
-  if ((m_map->find(key, entry)) != -1) {
+
+  const auto& find = m_map->find(key);
+  if (find != m_map->end()) {
+    auto& entry = find->second;
     auto impl = entry->getImplPtr();
     removeTrackerForEntry(key, entry, impl);
   }
@@ -573,16 +586,16 @@ void MapSegment::addTrackerForAllEntries(
     MapOfUpdateCounters& updateCounterMap) {
   if (m_concurrencyChecksEnabled) return;
   std::lock_guard<spinlock_mutex> lk(m_spinlock);
+
   std::shared_ptr<MapEntry> newEntry;
   std::shared_ptr<CacheableKey> key;
-  for (CacheableKeyHashMap::iterator iter = m_map->begin();
-       iter != m_map->end(); ++iter) {
-    (*iter).int_id_->getKey(key);
-    int updateCount = (*iter).int_id_->addTracker(newEntry);
+  for (auto& kv : *m_map) {
+    kv.second->getKey(key);
+    int updateCount = kv.second->addTracker(newEntry);
     if (newEntry != nullptr) {
-      m_map->rebind(key, newEntry);
+      kv.second = newEntry;
     }
-    updateCounterMap.insert(std::make_pair(key, updateCount));
+    updateCounterMap.emplace(key, updateCount);
   }
 }
 
@@ -599,25 +612,11 @@ void MapSegment::removeDestroyTracking() {
  * @brief replace the existing hash map with one that is wider
  *   to reduce collision chains.
  */
-void MapSegment::rehash() {  // Only called from put, segment must already be
-                             // locked...
-
-  uint32_t newMapSize = TableOfPrimes::getPrime(++m_primeIndex);
+void MapSegment::rehash() {
+  // Only called from put, segment must already be locked...
+  auto newMapSize = TableOfPrimes::getPrime(++m_primeIndex);
   LOGFINER("Rehashing MapSegment to size %d.", newMapSize);
-  auto* newMap = new CacheableKeyHashMap();
-  newMap->open(newMapSize);
-
-  // copy all entries into newMap..
-  for (CacheableKeyHashMap::iterator iter = m_map->begin();
-       iter != m_map->end(); ++iter) {
-    newMap->bind((*iter).ext_id_, (*iter).int_id_);
-  }
-
-  // plug newMap into real member.
-  CacheableKeyHashMap* oldMap = m_map;
-  m_map = newMap;
-  // clean up the old map.
-  delete oldMap;
+  m_map->reserve(newMapSize);
   m_rehashCount++;
 }
 std::shared_ptr<Cacheable> MapSegment::getFromDisc(
@@ -731,12 +730,13 @@ GfErrType MapSegment::isTombstone(std::shared_ptr<CacheableKey> key,
                                   std::shared_ptr<MapEntryImpl>& me,
                                   bool& result) {
   std::shared_ptr<Cacheable> value;
-  std::shared_ptr<MapEntry> entry;
   std::shared_ptr<MapEntryImpl> mePtr;
-  if (m_map->find(key, entry) == -1) {
+  const auto& find = m_map->find(key);
+  if (find == m_map->end()) {
     result = false;
     return GF_NOERR;
   }
+  auto& entry = find->second;
   mePtr = entry->getImplPtr();
 
   if (!mePtr) {
@@ -753,8 +753,9 @@ GfErrType MapSegment::isTombstone(std::shared_ptr<CacheableKey> key,
   if (CacheableToken::isTombstone(value)) {
     if (m_tombstoneList->exists(key)) {
       std::shared_ptr<MapEntry> entry;
-      if (m_map->find(key, entry) != -1) {
-        auto mePtr = entry->getImplPtr();
+      const auto find = m_map->find(key);
+      if (find != m_map->end()) {
+        auto mePtr = find->second->getImplPtr();
         me = mePtr;
       }
       result = true;
diff --git a/cppcache/src/MapSegment.hpp b/cppcache/src/MapSegment.hpp
index 4f6825c..3e00989 100644
--- a/cppcache/src/MapSegment.hpp
+++ b/cppcache/src/MapSegment.hpp
@@ -25,12 +25,6 @@
 #include <unordered_map>
 #include <vector>
 
-#include <ace/Functor_T.h>
-#include <ace/Hash_Map_Manager.h>
-#include <ace/Null_Mutex.h>
-#include <ace/Versioned_Namespace.h>
-#include <ace/config-lite.h>
-
 #include <geode/CacheableKey.hpp>
 #include <geode/Delta.hpp>
 #include <geode/RegionEntry.hpp>
@@ -42,42 +36,18 @@
 #include "TombstoneList.hpp"
 #include "util/concurrent/spinlock_mutex.hpp"
 
-namespace ACE_VERSIONED_NAMESPACE_NAME {
-
-template <>
-class ACE_Hash<std::shared_ptr<apache::geode::client::CacheableKey>> {
- public:
-  u_long operator()(
-      const std::shared_ptr<apache::geode::client::CacheableKey>& key) {
-    return key->hashcode();
-  }
-};
-
-template <>
-class ACE_Equal_To<std::shared_ptr<apache::geode::client::CacheableKey>> {
- public:
-  bool operator()(
-      const std::shared_ptr<apache::geode::client::CacheableKey>& key1,
-      const std::shared_ptr<apache::geode::client::CacheableKey>& key2) {
-    return key1->operator==(*key2);
-  }
-};
-
-// NOLINTNEXTLINE(google-readability-namespace-comments)
-}  // namespace ACE_VERSIONED_NAMESPACE_NAME
-
 namespace apache {
 namespace geode {
 namespace client {
 
 class RegionInternal;
-typedef ::ACE_Hash_Map_Manager_Ex<
-    std::shared_ptr<CacheableKey>, std::shared_ptr<MapEntry>,
-    ::ACE_Hash<std::shared_ptr<CacheableKey>>,
-    ::ACE_Equal_To<std::shared_ptr<CacheableKey>>, ::ACE_Null_Mutex>
+typedef std::unordered_map<std::shared_ptr<CacheableKey>,
+                           std::shared_ptr<MapEntry>,
+                           dereference_hash<std::shared_ptr<CacheableKey>>,
+                           dereference_equal_to<std::shared_ptr<CacheableKey>>>
     CacheableKeyHashMap;
 
-/** @brief type wrapper around the ACE map implementation. */
+/** @brief type wrapper around the std::unordered_map implementation. */
 class APACHE_GEODE_EXPORT MapSegment {
  private:
   // contain
@@ -115,7 +85,7 @@ class APACHE_GEODE_EXPORT MapSegment {
     std::shared_ptr<MapEntry> newEntry;
     entry->incrementUpdateCount(newEntry);
     if (newEntry != nullptr) {
-      m_map->rebind(key, newEntry);
+      m_map->emplace(key, newEntry);
       entry = newEntry;
       return true;
     }
@@ -139,13 +109,13 @@ class APACHE_GEODE_EXPORT MapSegment {
       entryImpl->getValueI(value);
       if (value == nullptr) {
         // get rid of an entry marked as destroyed
-        m_map->unbind(key);
+        m_map->erase(key);
         return;
       }
     }
     if (trackerPair.first) {
       entry = entryImpl ? entryImpl : entry->getImplPtr();
-      m_map->rebind(key, entry);
+      (*m_map)[key] = entry;
     }
   }
 
@@ -178,7 +148,7 @@ class APACHE_GEODE_EXPORT MapSegment {
         newEntry->getVersionStamp().setVersions(*versionStamp);
       }
     }
-    m_map->bind(key, newEntry);
+    m_map->emplace(key, newEntry);
     return GF_NOERR;
   }
 
diff --git a/cppcache/src/MapWithLock.hpp b/cppcache/src/MapWithLock.hpp
index 81ccdfd..58fab50 100644
--- a/cppcache/src/MapWithLock.hpp
+++ b/cppcache/src/MapWithLock.hpp
@@ -1,8 +1,3 @@
-#pragma once
-
-#ifndef GEODE_MAPWITHLOCK_H_
-#define GEODE_MAPWITHLOCK_H_
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -20,11 +15,12 @@
  * limitations under the License.
  */
 
-#include <string>
-#include <unordered_map>
+#pragma once
 
-#include <ace/Hash_Map_Manager.h>
-#include <ace/Recursive_Thread_Mutex.h>
+#ifndef GEODE_MAPWITHLOCK_H_
+#define GEODE_MAPWITHLOCK_H_
+
+#include <unordered_map>
 
 #include <geode/CacheableKey.hpp>
 #include <geode/internal/geode_globals.hpp>
@@ -37,24 +33,6 @@ typedef std::unordered_map<std::shared_ptr<CacheableKey>, int,
                            CacheableKey::hash, CacheableKey::equal_to>
     MapOfUpdateCounters;
 
-class Region;
-/** Map type used to hold root regions in the Cache, and subRegions. */
-typedef ACE_Hash_Map_Manager_Ex<
-    std::string, std::shared_ptr<Region>, ACE_Hash<std::string>,
-    ACE_Equal_To<std::string>, ACE_Recursive_Thread_Mutex>
-    MapOfRegionWithLock;
-
-class CqQuery;
-typedef ACE_Hash_Map_Manager_Ex<
-    std::string, std::shared_ptr<CqQuery>, ACE_Hash<std::string>,
-    ACE_Equal_To<std::string>, ACE_Recursive_Thread_Mutex>
-    MapOfCqQueryWithLock;
-
-/** Guard type for locking a MapOfRegionWithLock while iterating or performing
- * other composite operations. ex.. MapOfRegionGuard guard( map->mutex() );
- */
-typedef ACE_Guard<ACE_Recursive_Thread_Mutex> MapOfRegionGuard;
-
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/NonCopyable.hpp b/cppcache/src/NonCopyable.hpp
index 3bb3063..7dd6cb5 100644
--- a/cppcache/src/NonCopyable.hpp
+++ b/cppcache/src/NonCopyable.hpp
@@ -31,8 +31,8 @@ class APACHE_GEODE_EXPORT NonCopyable {
   NonCopyable() {}
   ~NonCopyable() {}
 
- private:
-  NonCopyable(const NonCopyable&);
+ public:
+  NonCopyable(const NonCopyable&) = delete;
 };
 
 class APACHE_GEODE_EXPORT NonAssignable {
@@ -40,8 +40,8 @@ class APACHE_GEODE_EXPORT NonAssignable {
   NonAssignable() {}
   ~NonAssignable() {}
 
- private:
-  const NonAssignable& operator=(const NonAssignable&);
+ public:
+  const NonAssignable& operator=(const NonAssignable&) = delete;
 };
 
 }  // namespace client
diff --git a/cppcache/src/Pool.cpp b/cppcache/src/Pool.cpp
index 4a02faf..129bbc6 100644
--- a/cppcache/src/Pool.cpp
+++ b/cppcache/src/Pool.cpp
@@ -21,12 +21,9 @@
 #include <geode/Pool.hpp>
 
 #include "PoolAttributes.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientPoolHADM.hpp"
 
-/**
- * @file
- */
-
 namespace apache {
 namespace geode {
 namespace client {
diff --git a/cppcache/src/PoolFactory.cpp b/cppcache/src/PoolFactory.cpp
index c01582b..deaed13 100644
--- a/cppcache/src/PoolFactory.cpp
+++ b/cppcache/src/PoolFactory.cpp
@@ -25,6 +25,7 @@
 #include "CacheImpl.hpp"
 #include "CacheRegionHelper.hpp"
 #include "PoolAttributes.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientPoolDM.hpp"
 #include "ThinClientPoolHADM.hpp"
 #include "ThinClientPoolStickyDM.hpp"
diff --git a/cppcache/src/PoolStatistics.hpp b/cppcache/src/PoolStatistics.hpp
index b12ed79..8b7e861 100644
--- a/cppcache/src/PoolStatistics.hpp
+++ b/cppcache/src/PoolStatistics.hpp
@@ -26,6 +26,7 @@
 #include "statistics/Statistics.hpp"
 #include "statistics/StatisticsFactory.hpp"
 #include "statistics/StatisticsManager.hpp"
+#include "util/concurrent/spinlock_mutex.hpp"
 
 namespace apache {
 namespace geode {
diff --git a/cppcache/src/RemoteQuery.cpp b/cppcache/src/RemoteQuery.cpp
index ae72a8a..fce5803 100644
--- a/cppcache/src/RemoteQuery.cpp
+++ b/cppcache/src/RemoteQuery.cpp
@@ -19,6 +19,7 @@
 
 #include "ResultSetImpl.hpp"
 #include "StructSetImpl.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientPoolDM.hpp"
 #include "ThinClientRegion.hpp"
 #include "UserAttributes.hpp"
diff --git a/cppcache/src/RemoteQuery.hpp b/cppcache/src/RemoteQuery.hpp
index 71dda84..263d2e0 100644
--- a/cppcache/src/RemoteQuery.hpp
+++ b/cppcache/src/RemoteQuery.hpp
@@ -32,16 +32,13 @@
 #include <geode/internal/geode_globals.hpp>
 
 #include "CacheImpl.hpp"
-#include "ThinClientBaseDM.hpp"
-
-/**
- * @file
- */
 
 namespace apache {
 namespace geode {
 namespace client {
 
+class ThinClientBaseDM;
+
 class APACHE_GEODE_EXPORT RemoteQuery : public Query {
   std::string m_queryString;
   std::shared_ptr<RemoteQueryService> m_queryService;
diff --git a/cppcache/src/SerializationRegistry.cpp b/cppcache/src/SerializationRegistry.cpp
index 5040c9e..7918144 100644
--- a/cppcache/src/SerializationRegistry.cpp
+++ b/cppcache/src/SerializationRegistry.cpp
@@ -230,7 +230,8 @@ SerializationRegistry::deserializeDataSerializableFixedId(DataInput& input,
 
   TypeFactoryMethod createType = nullptr;
 
-  theTypeMap.findDataSerializableFixedId(fixedId, createType);
+  theTypeMap.findDataSerializableFixedId(static_cast<DSFid>(fixedId),
+                                         createType);
 
   if (createType == nullptr) {
     throw IllegalStateException("Unregistered type in deserialization");
@@ -285,11 +286,12 @@ void SerializationRegistry::addDataSerializableFixedIdType(
 }
 
 void SerializationRegistry::addDataSerializableFixedIdType(
-    int32_t id, TypeFactoryMethod func) {
+    internal::DSFid id, TypeFactoryMethod func) {
   theTypeMap.rebindDataSerializableFixedId(id, func);
 }
 
-void SerializationRegistry::removeDataSerializableFixeIdType(int32_t id) {
+void SerializationRegistry::removeDataSerializableFixeIdType(
+    internal::DSFid id) {
   theTypeMap.unbindDataSerializableFixedId(id);
 }
 
@@ -367,36 +369,45 @@ std::shared_ptr<Serializable> SerializationRegistry::GetEnum(
 void TheTypeMap::clear() {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializableMapLock);
-  m_dataSerializableMap->unbind_all();
+  m_dataSerializableMap.clear();
 
   std::lock_guard<util::concurrent::spinlock_mutex> guard2(
       m_dataSerializableFixedIdMapLock);
-  m_dataSerializableFixedIdMap->unbind_all();
+  m_dataSerializableFixedIdMap.clear();
 
   std::lock_guard<util::concurrent::spinlock_mutex> guard3(
       m_pdxSerializableMapLock);
-  m_pdxSerializableMap->unbind_all();
+  m_pdxSerializableMap.clear();
 }
 
 void TheTypeMap::findDataSerializable(int32_t id,
                                       TypeFactoryMethod& func) const {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializableMapLock);
-  m_dataSerializableMap->find(id, func);
+  const auto& found = m_dataSerializableMap.find(id);
+  if (found != m_dataSerializableMap.end()) {
+    func = found->second;
+  }
 }
 
-void TheTypeMap::findDataSerializableFixedId(int32_t id,
+void TheTypeMap::findDataSerializableFixedId(DSFid dsfid,
                                              TypeFactoryMethod& func) const {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializableFixedIdMapLock);
-  m_dataSerializableFixedIdMap->find(id, func);
+  const auto& found = m_dataSerializableFixedIdMap.find(dsfid);
+  if (found != m_dataSerializableFixedIdMap.end()) {
+    func = found->second;
+  }
 }
 
 void TheTypeMap::findDataSerializablePrimitive(DSCode dsCode,
                                                TypeFactoryMethod& func) const {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializablePrimitiveMapLock);
-  m_dataSerializablePrimitiveMap->find(dsCode, func);
+  const auto& found = m_dataSerializablePrimitiveMap.find(dsCode);
+  if (found != m_dataSerializablePrimitiveMap.end()) {
+    func = found->second;
+  }
 }
 
 void TheTypeMap::bindDataSerializable(TypeFactoryMethod func, int32_t id) {
@@ -412,49 +423,34 @@ void TheTypeMap::bindDataSerializable(TypeFactoryMethod func, int32_t id) {
 
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializableMapLock);
-  int bindRes = m_dataSerializableMap->bind(id, func);
-  if (bindRes == 1) {
+  const auto& result = m_dataSerializableMap.emplace(id, func);
+  if (!result.second) {
     LOGERROR("A class with ID %d is already registered.", id);
     throw IllegalStateException("A class with given ID is already registered.");
-  } else if (bindRes == -1) {
-    LOGERROR("Unknown error while adding class ID %d to map.", id);
-    throw IllegalStateException("Unknown error while adding type to map.");
   }
 }
 
 void TheTypeMap::rebindDataSerializable(int32_t id, TypeFactoryMethod func) {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializableMapLock);
-  int bindRes = m_dataSerializableMap->rebind(id, func);
-  if (bindRes == -1) {
-    LOGERROR(
-        "Unknown error "
-        "while adding class ID %d to map.",
-        id);
-    throw IllegalStateException(
-        "Unknown error "
-        "while adding type to map.");
-  }
+  m_dataSerializableMap[id] = func;
 }
 
 void TheTypeMap::unbindDataSerializable(int32_t id) {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializableMapLock);
-  m_dataSerializableMap->unbind(id);
+  m_dataSerializableMap.erase(id);
 }
 
 void TheTypeMap::bindDataSerializablePrimitive(TypeFactoryMethod func,
                                                DSCode dsCode) {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializablePrimitiveMapLock);
-  int bindRes = m_dataSerializablePrimitiveMap->bind(dsCode, func);
-  if (bindRes == 1) {
+  const auto& result = m_dataSerializablePrimitiveMap.emplace(dsCode, func);
+  if (!result.second) {
     LOGERROR("A class with DSCode %d is already registered.", dsCode);
     throw IllegalStateException(
         "A class with given DSCode is already registered.");
-  } else if (bindRes == -1) {
-    LOGERROR("Unknown error while adding DSCode %d to map.", dsCode);
-    throw IllegalStateException("Unknown error while adding type to map.");
   }
 }
 
@@ -462,16 +458,16 @@ void TheTypeMap::rebindDataSerializablePrimitive(DSCode dsCode,
                                                  TypeFactoryMethod func) {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializablePrimitiveMapLock);
-  m_dataSerializablePrimitiveMap->rebind(dsCode, func);
+  m_dataSerializablePrimitiveMap[dsCode] = func;
 }
 
 void TheTypeMap::bindDataSerializableFixedId(TypeFactoryMethod func) {
   auto obj = func();
 
-  int32_t id = 0;
+  DSFid id;
   if (const auto dataSerializableFixedId =
           std::dynamic_pointer_cast<DataSerializableFixedId>(obj)) {
-    id = static_cast<int64_t>(dataSerializableFixedId->getDSFID());
+    id = dataSerializableFixedId->getDSFID();
   } else {
     throw UnsupportedOperationException(
         "TheTypeMap::bindDataSerializableInternal: Unknown serialization "
@@ -480,37 +476,25 @@ void TheTypeMap::bindDataSerializableFixedId(TypeFactoryMethod func) {
 
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializableFixedIdMapLock);
-  int bindRes = m_dataSerializableFixedIdMap->bind(id, func);
-  if (bindRes == 1) {
-    LOGERROR(
-        "A fixed class with "
-        "ID %d is already registered.",
-        id);
+  const auto& result = m_dataSerializableFixedIdMap.emplace(id, func);
+  if (!result.second) {
+    LOGERROR("A fixed class with ID %d is already registered.", id);
     throw IllegalStateException(
-        "A fixed class with "
-        "given ID is already registered.");
-  } else if (bindRes == -1) {
-    LOGERROR(
-        "Unknown error "
-        "while adding class ID %d to map2.",
-        id);
-    throw IllegalStateException(
-        "Unknown error "
-        "while adding to map2.");
+        "A fixed class with given ID is already registered.");
   }
 }
 
-void TheTypeMap::rebindDataSerializableFixedId(int32_t id,
+void TheTypeMap::rebindDataSerializableFixedId(internal::DSFid id,
                                                TypeFactoryMethod func) {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializableFixedIdMapLock);
-  m_dataSerializableFixedIdMap->rebind(id, func);
+  m_dataSerializableFixedIdMap[id] = func;
 }
 
-void TheTypeMap::unbindDataSerializableFixedId(int32_t id) {
+void TheTypeMap::unbindDataSerializableFixedId(internal::DSFid id) {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_dataSerializableFixedIdMapLock);
-  m_dataSerializableFixedIdMap->unbind(id);
+  m_dataSerializableFixedIdMap.erase(id);
 }
 
 void TheTypeMap::bindPdxSerializable(TypeFactoryMethodPdx func) {
@@ -519,17 +503,12 @@ void TheTypeMap::bindPdxSerializable(TypeFactoryMethodPdx func) {
       m_pdxSerializableMapLock);
   auto&& objFullName = obj->getClassName();
 
-  int bindRes = m_pdxSerializableMap->bind(objFullName, func);
-
-  if (bindRes == 1) {
+  const auto& result = m_pdxSerializableMap.emplace(objFullName, func);
+  if (!result.second) {
     LOGERROR("A object with FullName " + objFullName +
              " is already registered.");
     throw IllegalStateException(
         "A Object with given FullName is already registered.");
-  } else if (bindRes == -1) {
-    LOGERROR("Unknown error while adding Pdx Object named " + objFullName +
-             " to map.");
-    throw IllegalStateException("Unknown error while adding type to map.");
   }
 }
 
@@ -537,27 +516,23 @@ void TheTypeMap::findPdxSerializable(const std::string& objFullName,
                                      TypeFactoryMethodPdx& func) const {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_pdxSerializableMapLock);
-  m_pdxSerializableMap->find(objFullName, func);
+  const auto& found = m_pdxSerializableMap.find(objFullName);
+  if (found != m_pdxSerializableMap.end()) {
+    func = found->second;
+  }
 }
 
 void TheTypeMap::rebindPdxSerializable(std::string objFullName,
                                        TypeFactoryMethodPdx func) {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_pdxSerializableMapLock);
-  int bindRes = m_pdxSerializableMap->rebind(objFullName, func);
-  if (bindRes == -1) {
-    LOGERROR("Unknown error while adding Pdx Object FullName " + objFullName +
-             " to map.");
-    throw IllegalStateException(
-        "Unknown error "
-        "while adding type to map.");
-  }
+  m_pdxSerializableMap[objFullName] = func;
 }
 
 void TheTypeMap::unbindPdxSerializable(const std::string& objFullName) {
   std::lock_guard<util::concurrent::spinlock_mutex> guard(
       m_pdxSerializableMapLock);
-  m_pdxSerializableMap->unbind(objFullName);
+  m_pdxSerializableMap.erase(objFullName);
 }
 
 void PdxTypeHandler::serialize(
diff --git a/cppcache/src/SerializationRegistry.hpp b/cppcache/src/SerializationRegistry.hpp
index 329cc62..bd46bcd 100644
--- a/cppcache/src/SerializationRegistry.hpp
+++ b/cppcache/src/SerializationRegistry.hpp
@@ -28,10 +28,6 @@
 #include <typeinfo>
 #include <unordered_map>
 
-#include <ace/Hash_Map_Manager.h>
-#include <ace/Null_Mutex.h>
-#include <ace/Thread_Mutex.h>
-
 #include <geode/DataOutput.hpp>
 #include <geode/DataSerializable.hpp>
 #include <geode/Delta.hpp>
@@ -48,33 +44,27 @@
 #include "config.h"
 #include "util/concurrent/spinlock_mutex.hpp"
 
-namespace ACE_VERSIONED_NAMESPACE_NAME {
+namespace std {
 
-#if defined(_MACOSX)
-// TODO CMake check type int64_t
 template <>
-class ACE_Export ACE_Hash<int64_t> {
- public:
-  // NOLINTNEXTLINE(google-runtime-int)
-  inline unsigned long operator()(int64_t t) const {
-    // NOLINTNEXTLINE(google-runtime-int)
-    return static_cast<unsigned long>(t);
+struct hash<apache::geode::client::internal::DSCode>
+    : public std::unary_function<apache::geode::client::internal::DSCode,
+                                 size_t> {
+  size_t operator()(apache::geode::client::internal::DSCode val) const {
+    return std::hash<int32_t>{}(static_cast<int32_t>(val));
   }
 };
 
-#endif
-
-using apache::geode::client::DSCode;
 template <>
-class ACE_Hash<DSCode> {
- public:
-  inline u_long operator()(const DSCode key) {
-    return static_cast<u_long>(key);
+struct hash<apache::geode::client::internal::DSFid>
+    : public std::unary_function<apache::geode::client::internal::DSFid,
+                                 size_t> {
+  size_t operator()(apache::geode::client::internal::DSFid val) const {
+    return std::hash<int32_t>{}(static_cast<int32_t>(val));
   }
 };
 
-// NOLINTNEXTLINE(google-readability-namespace-comments)
-}  // namespace ACE_VERSIONED_NAMESPACE_NAME
+}  // namespace std
 
 namespace apache {
 namespace geode {
@@ -83,21 +73,14 @@ namespace client {
 using internal::DataSerializableInternal;
 using internal::DataSerializablePrimitive;
 
-typedef ACE_Hash_Map_Manager<DSCode, TypeFactoryMethod, ACE_Null_Mutex>
-    DSCodeToFactoryMap;
-
-typedef ACE_Hash_Map_Manager<int32_t, TypeFactoryMethod, ACE_Null_Mutex>
-    IdToFactoryMap;
-
-typedef ACE_Hash_Map_Manager<std::string, TypeFactoryMethodPdx, ACE_Null_Mutex>
-    StrToPdxFactoryMap;
-
 class TheTypeMap : private NonCopyable {
  private:
-  DSCodeToFactoryMap* m_dataSerializablePrimitiveMap;
-  IdToFactoryMap* m_dataSerializableMap;
-  IdToFactoryMap* m_dataSerializableFixedIdMap;
-  StrToPdxFactoryMap* m_pdxSerializableMap;
+  std::unordered_map<internal::DSCode, TypeFactoryMethod>
+      m_dataSerializablePrimitiveMap;
+  std::unordered_map<int32_t, TypeFactoryMethod> m_dataSerializableMap;
+  std::unordered_map<internal::DSFid, TypeFactoryMethod>
+      m_dataSerializableFixedIdMap;
+  std::unordered_map<std::string, TypeFactoryMethodPdx> m_pdxSerializableMap;
   mutable util::concurrent::spinlock_mutex m_dataSerializablePrimitiveMapLock;
   mutable util::concurrent::spinlock_mutex m_dataSerializableMapLock;
   mutable util::concurrent::spinlock_mutex m_dataSerializableFixedIdMapLock;
@@ -107,35 +90,9 @@ class TheTypeMap : private NonCopyable {
   std::unordered_map<std::type_index, int32_t> typeToClassId;
 
  public:
-  TheTypeMap() {
-    // map to hold DataSerializablePrimitive
-    m_dataSerializablePrimitiveMap = new DSCodeToFactoryMap();
-
-    // map to hold Data Serializable IDs
-    m_dataSerializableMap = new IdToFactoryMap();
-
-    // map to hold internal Data Serializable Fixed IDs
-    m_dataSerializableFixedIdMap = new IdToFactoryMap();
+  TheTypeMap() { setup(); }
 
-    // map to hold PDX types <string, funptr>.
-    m_pdxSerializableMap = new StrToPdxFactoryMap();
-
-    setup();
-  }
-
-  virtual ~TheTypeMap() {
-    if (m_dataSerializableMap != nullptr) {
-      delete m_dataSerializableMap;
-    }
-
-    if (m_dataSerializableFixedIdMap != nullptr) {
-      delete m_dataSerializableFixedIdMap;
-    }
-
-    if (m_pdxSerializableMap != nullptr) {
-      delete m_pdxSerializableMap;
-    }
-  }
+  ~TheTypeMap() noexcept = default;
 
   void setup();
 
@@ -149,13 +106,15 @@ class TheTypeMap : private NonCopyable {
 
   void unbindDataSerializable(int32_t id);
 
-  void findDataSerializableFixedId(int32_t id, TypeFactoryMethod& func) const;
+  void findDataSerializableFixedId(internal::DSFid id,
+                                   TypeFactoryMethod& func) const;
 
   void bindDataSerializableFixedId(TypeFactoryMethod func);
 
-  void rebindDataSerializableFixedId(int32_t idd, TypeFactoryMethod func);
+  void rebindDataSerializableFixedId(internal::DSFid id,
+                                     TypeFactoryMethod func);
 
-  void unbindDataSerializableFixedId(int32_t id);
+  void unbindDataSerializableFixedId(internal::DSFid id);
 
   void bindPdxSerializable(TypeFactoryMethodPdx func);
 
@@ -287,9 +246,10 @@ class APACHE_GEODE_EXPORT SerializationRegistry {
 
   void addDataSerializableFixedIdType(TypeFactoryMethod func);
 
-  void addDataSerializableFixedIdType(int32_t id, TypeFactoryMethod func);
+  void addDataSerializableFixedIdType(internal::DSFid id,
+                                      TypeFactoryMethod func);
 
-  void removeDataSerializableFixeIdType(int32_t id);
+  void removeDataSerializableFixeIdType(internal::DSFid id);
 
   void setDataSerializablePrimitiveType(TypeFactoryMethod func, DSCode dsCode);
 
diff --git a/cppcache/src/Set.hpp b/cppcache/src/Set.hpp
deleted file mode 100644
index 89c791c..0000000
--- a/cppcache/src/Set.hpp
+++ /dev/null
@@ -1,132 +0,0 @@
-#pragma once
-
-#ifndef GEODE_SET_H_
-#define GEODE_SET_H_
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <unordered_set>
-
-#include <ace/Guard_T.h>
-#include <ace/Recursive_Thread_Mutex.h>
-
-#include "NonCopyable.hpp"
-
-namespace apache {
-namespace geode {
-namespace client {
-
-// A synchronized Set using std::unordered_set<T>
-
-/* adongre
- * CID 28616: Other violation (COPY_WITHOUT_ASSIGN)
- * Class "apache::geode::client::Set<unsigned short>::Iterator" has user-written
- * copyi
- * `constructor "apache::geode::client::Set<unsigned
- * short>::Iterator::Iterator(apache::geode::client::Set<unsigned
- * short>::Iterator const &)" i
- * but no corresponding user-written assignment operator.
- *
- * FIX : Make the class non copyable
- */
-template <typename T>
-class APACHE_GEODE_EXPORT Set : private NonAssignable {
- public:
-  // Iterator for a synchronized Set
-  class Iterator {
-   private:
-    Set<T>& m_set;
-    typename std::unordered_set<T>::const_iterator m_iter;
-
-    explicit Iterator(Set<T>& set) : m_set(set) {
-      m_set.m_mutex.acquire();
-      m_iter = set.m_set.begin();
-    }
-    // Never defined.
-    Iterator();
-
-   public:
-    Iterator(const Iterator& other) : m_set(other.m_set) {
-      m_set.m_mutex.acquire();
-      m_iter = other.m_iter;
-    }
-
-    inline const T& next() { return *(m_iter++); }
-
-    inline bool hasNext() const { return (m_iter != m_set.m_set.end()); }
-
-    ~Iterator() { m_set.m_mutex.release(); }
-
-    friend class Set;
-  };
-
-  Set() : m_set() {}
-
-  ~Set() {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> _guard(m_mutex);
-
-    m_set.clear();
-  }
-
-  inline bool insert(const T& key) {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> _guard(m_mutex);
-
-    return m_set.insert(key).second;
-  }
-
-  inline bool find(const T& key) const {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> _guard(m_mutex);
-
-    return (m_set.find(key) != m_set.end());
-  }
-
-  inline bool erase(const T& key) {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> _guard(m_mutex);
-
-    return (m_set.erase(key) > 0);
-  }
-
-  inline size_t size() {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> _guard(m_mutex);
-
-    return m_set.size();
-  }
-
-  inline void clear() {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> _guard(m_mutex);
-
-    m_set.clear();
-  }
-
-  inline bool empty() {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> _guard(m_mutex);
-
-    return m_set.empty();
-  }
-
-  inline Iterator iterator() { return Iterator(*this); }
-
- private:
-  std::unordered_set<T> m_set;
-  ACE_Recursive_Thread_Mutex m_mutex;
-};
-}  // namespace client
-}  // namespace geode
-}  // namespace apache
-
-#endif  // GEODE_SET_H_
diff --git a/cppcache/src/Task.hpp b/cppcache/src/Task.hpp
index 30340ad..5612422 100644
--- a/cppcache/src/Task.hpp
+++ b/cppcache/src/Task.hpp
@@ -20,9 +20,9 @@
 #ifndef GEODE_Task_H_
 #define GEODE_Task_H_
 
+#include <atomic>
 #include <memory>
-
-#include <ace/Task.h>
+#include <thread>
 
 #include "AppDomainContext.hpp"
 #include "DistributedSystemImpl.hpp"
@@ -30,69 +30,63 @@
 namespace apache {
 namespace geode {
 namespace client {
-const char NC_thread[] = "NC thread";
+
+/**
+ * Re-implementation of the Task class based on std::thread rather then
+ * ACE_Task.
+ */
 template <class T>
-class APACHE_GEODE_EXPORT Task : public ACE_Task_Base {
+class Task {
  public:
-  /// Handle timeout events.
-  typedef int (T::*OPERATION)(volatile bool& isRunning);
-
-  // op_handler is the receiver of the timeout event. timeout is the method to
-  // be executed by op_handler_
-  Task(T* op_handler, OPERATION op)
-      : op_handler_(op_handler),
-        m_op(op),
-        m_run(false),
-        m_threadName(NC_thread),
-        m_appDomainContext(createAppDomainContext()) {}
-
-  // op_handler is the receiver of the timeout event. timeout is the method to
-  // be executed by op_handler_
-  Task(T* op_handler, OPERATION op, const char* tn)
-      : op_handler_(op_handler),
-        m_op(op),
-        m_run(false),
-        m_threadName(tn),
-        m_appDomainContext(createAppDomainContext()) {}
-
-  ~Task() {}
-
-  void start() {
-    m_run = true;
-    activate();
+  typedef void (T::*Method)(std::atomic<bool>& isRunning);
+
+  inline Task(T* target, Method method, const char* threadName)
+      : target_(target),
+        method_(method),
+        threadName_(threadName),
+        runnable_(false),
+        appDomainContext_(createAppDomainContext()) {}
+
+  inline ~Task() noexcept { stop(); };
+
+  inline void start() {
+    runnable_ = true;
+    thread_ = std::thread(&Task::svc, this);
   }
 
-  void stop() {
-    if (m_run) {
-      m_run = false;
-      wait();
-    }
+  inline void stop() noexcept {
+    stopNoblock();
+    wait();
   }
 
-  void stopNoblock() { m_run = false; }
+  inline void stopNoblock() noexcept { runnable_ = false; }
 
-  int svc(void) {
-    DistributedSystemImpl::setThreadName(m_threadName);
+  inline void wait() noexcept {
+    if (thread_.joinable()) {
+      thread_.join();
+    }
+  }
 
-    if (m_appDomainContext) {
-      int ret;
-      m_appDomainContext->run([this, &ret]() {
-        ret = (this->op_handler_->*this->m_op)(this->m_run);
-      });
-      return ret;
+  inline void svc(void) {
+    DistributedSystemImpl::setThreadName(threadName_);
+
+    if (appDomainContext_) {
+      appDomainContext_->run(
+          [this]() { (this->target_->*this->method_)(this->runnable_); });
     } else {
-      return (this->op_handler_->*m_op)(m_run);
+      (this->target_->*method_)(runnable_);
     }
   }
 
  private:
-  T* op_handler_;
-  /// Handle timeout events.
-  OPERATION m_op;
-  volatile bool m_run;
-  const char* m_threadName;
-  std::unique_ptr<AppDomainContext> m_appDomainContext;
+  std::thread thread_;
+  T* target_;
+  Method method_;
+  const char* threadName_;
+  std::atomic<bool> runnable_;
+  std::unique_ptr<AppDomainContext> appDomainContext_;
 };
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/TcrConnection.cpp b/cppcache/src/TcrConnection.cpp
index 15affd8..bdfa00e 100644
--- a/cppcache/src/TcrConnection.cpp
+++ b/cppcache/src/TcrConnection.cpp
@@ -50,7 +50,8 @@ const int64_t INITIAL_CONNECTION_ID = 26739;
     throw ex;                                         \
   }
 bool TcrConnection::InitTcrConnection(
-    TcrEndpoint* endpointObj, const char* endpoint, Set<uint16_t>& ports,
+    TcrEndpoint* endpointObj, const char* endpoint,
+    synchronized_set<std::unordered_set<uint16_t>>& ports,
     bool isClientNotification, bool isSecondary,
     std::chrono::microseconds connectTimeout) {
   m_conn = nullptr;
@@ -127,10 +128,10 @@ bool TcrConnection::InitTcrConnection(
     ports.insert(m_port);
   } else {
     // add the local ports to message
-    Set<uint16_t>::Iterator iter = ports.iterator();
+    auto&& lock = ports.make_lock();
     handShakeMsg.writeInt(static_cast<int32_t>(ports.size()));
-    while (iter.hasNext()) {
-      handShakeMsg.writeInt(static_cast<int32_t>(iter.next()));
+    for (const auto& port : ports) {
+      handShakeMsg.writeInt(static_cast<int32_t>(port));
     }
   }
 
diff --git a/cppcache/src/TcrConnection.hpp b/cppcache/src/TcrConnection.hpp
index dae6d2b..64d78e4 100644
--- a/cppcache/src/TcrConnection.hpp
+++ b/cppcache/src/TcrConnection.hpp
@@ -31,8 +31,8 @@
 
 #include "Connector.hpp"
 #include "DiffieHellman.hpp"
-#include "Set.hpp"
 #include "TcrMessage.hpp"
+#include "util/synchronized_set.hpp"
 
 #define DEFAULT_TIMEOUT_RETRIES 12
 #define PRIMARY_SERVER_TO_CLIENT 101
@@ -114,7 +114,8 @@ class APACHE_GEODE_EXPORT TcrConnection {
    * @param     numPorts  Size of ports list
    */
   bool InitTcrConnection(
-      TcrEndpoint* endpointObj, const char* endpoint, Set<uint16_t>& ports,
+      TcrEndpoint* endpointObj, const char* endpoint,
+      synchronized_set<std::unordered_set<uint16_t>>& ports,
       bool isClientNotification = false, bool isSecondary = false,
       std::chrono::microseconds connectTimeout = DEFAULT_CONNECT_TIMEOUT);
 
diff --git a/cppcache/src/TcrConnectionManager.cpp b/cppcache/src/TcrConnectionManager.cpp
index 35df517..cb6dcae 100644
--- a/cppcache/src/TcrConnectionManager.cpp
+++ b/cppcache/src/TcrConnectionManager.cpp
@@ -127,8 +127,9 @@ void TcrConnectionManager::init(bool isPool) {
       GfErrTypeToException("TcrConnectionManager::init", err);
     }
 
-    m_redundancyTask = new Task<TcrConnectionManager>(
-        this, &TcrConnectionManager::redundancy, NC_Redundancy);
+    m_redundancyTask = std::unique_ptr<Task<TcrConnectionManager>>(
+        new Task<TcrConnectionManager>(this, &TcrConnectionManager::redundancy,
+                                       NC_Redundancy));
     m_redundancyTask->start();
 
     m_redundancyManager->m_HAenabled = true;
@@ -138,19 +139,21 @@ void TcrConnectionManager::init(bool isPool) {
 }
 
 void TcrConnectionManager::startFailoverAndCleanupThreads(bool isPool) {
-  if (m_failoverTask == nullptr || m_cleanupTask == nullptr) {
+  if (!isPool && (m_failoverTask == nullptr || m_cleanupTask == nullptr)) {
     std::lock_guard<decltype(m_distMngrsLock)> _guard(m_distMngrsLock);
-    if (m_failoverTask == nullptr && !isPool) {
-      m_failoverTask = new Task<TcrConnectionManager>(
-          this, &TcrConnectionManager::failover, NC_Failover);
+    if (!m_failoverTask) {
+      m_failoverTask = std::unique_ptr<Task<TcrConnectionManager>>(
+          new Task<TcrConnectionManager>(this, &TcrConnectionManager::failover,
+                                         NC_Failover));
       m_failoverTask->start();
     }
-    if (m_cleanupTask == nullptr && !isPool) {
-      if (m_redundancyManager->m_HAenabled && !isPool) {
+    if (!m_cleanupTask) {
+      if (m_redundancyManager->m_HAenabled) {
         m_redundancyManager->startPeriodicAck();
       }
-      m_cleanupTask = new Task<TcrConnectionManager>(
-          this, &TcrConnectionManager::cleanup, NC_CleanUp);
+      m_cleanupTask = std::unique_ptr<Task<TcrConnectionManager>>(
+          new Task<TcrConnectionManager>(this, &TcrConnectionManager::cleanup,
+                                         NC_CleanUp));
       m_cleanupTask->start();
     }
   }
@@ -166,7 +169,7 @@ void TcrConnectionManager::close() {
     m_failoverTask->stopNoblock();
     m_failoverSema.release();
     m_failoverTask->wait();
-    _GEODE_SAFE_DELETE(m_failoverTask);
+    m_failoverTask = nullptr;
   }
 
   auto cacheAttributes = m_cache->getAttributes();
@@ -181,7 +184,7 @@ void TcrConnectionManager::close() {
       m_redundancyTask->wait();
       // now stop cleanup task
       // stopCleanupTask();
-      _GEODE_SAFE_DELETE(m_redundancyTask);
+      m_redundancyTask = nullptr;
     }
 
     m_redundancyManager->close();
@@ -204,22 +207,18 @@ TcrConnectionManager::~TcrConnectionManager() {
     m_cleanupTask->wait();
     // Clean notification lists if something remains in there; see bug #250
     cleanNotificationLists();
-    _GEODE_SAFE_DELETE(m_cleanupTask);
+    m_cleanupTask = nullptr;
 
     // sanity cleanup of any remaining endpoints with warning; see bug #298
     //  cleanup of endpoints, when regions are destroyed via notification
     {
-      ACE_Guard<ACE_Recursive_Thread_Mutex> guard(m_endpoints.mutex());
-
-      size_t numEndPoints = m_endpoints.current_size();
+      auto &&guard = m_endpoints.make_lock();
+      auto numEndPoints = m_endpoints.size();
       if (numEndPoints > 0) {
         LOGFINE("TCCM: endpoints remain in destructor");
       }
-      for (ACE_Map_Manager<std::string, TcrEndpoint *,
-                           ACE_Recursive_Thread_Mutex>::iterator iter =
-               m_endpoints.begin();
-           iter != m_endpoints.end(); ++iter) {
-        TcrEndpoint *ep = (*iter).int_id_;
+      for (const auto &iter : m_endpoints) {
+        auto ep = iter.second;
         LOGFINE("TCCM: forcing endpoint delete for %d in destructor",
                 ep->name().c_str());
         _GEODE_SAFE_DELETE(ep);
@@ -234,7 +233,7 @@ void TcrConnectionManager::connect(
     const std::unordered_set<std::string> &endpointStrs) {
   std::lock_guard<decltype(m_distMngrsLock)> guardDistMngrs(m_distMngrsLock);
   {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> guard(m_endpoints.mutex());
+    auto &&endpointsGuard = m_endpoints.make_lock();
     int32_t numEndPoints = static_cast<int32_t>(endpointStrs.size());
 
     if (numEndPoints == 0) {
@@ -242,12 +241,9 @@ void TcrConnectionManager::connect(
           "TcrConnectionManager::connect(): Empty endpointstr vector "
           "passed to TCCM, will initialize endpoints list with all available "
           "endpoints (%d).",
-          m_endpoints.current_size());
-      for (ACE_Map_Manager<std::string, TcrEndpoint *,
-                           ACE_Recursive_Thread_Mutex>::iterator currItr =
-               m_endpoints.begin();
-           currItr != m_endpoints.end(); ++currItr) {
-        TcrEndpoint *ep = (*currItr).int_id_;
+          m_endpoints.size());
+      for (const auto &currItr : m_endpoints) {
+        auto ep = currItr.second;
         ep->setNumRegions(ep->numRegions() + 1);
         LOGFINER(
             "TCCM 2: incremented region reference count for endpoint %s "
@@ -256,10 +252,8 @@ void TcrConnectionManager::connect(
         endpoints.push_back(ep);
       }
     } else {
-      for (std::unordered_set<std::string>::const_iterator iter =
-               endpointStrs.begin();
-           iter != endpointStrs.end(); ++iter) {
-        TcrEndpoint *ep = addRefToTcrEndpoint(*iter, distMng);
+      for (const auto &iter : endpointStrs) {
+        auto ep = addRefToTcrEndpoint(iter, distMng);
         endpoints.push_back(ep);
       }
     }
@@ -283,14 +277,16 @@ void TcrConnectionManager::connect(
 TcrEndpoint *TcrConnectionManager::addRefToTcrEndpoint(std::string endpointName,
                                                        ThinClientBaseDM *dm) {
   TcrEndpoint *ep = nullptr;
-  /*
-  endpointName = Utils::convertHostToCanonicalForm(endpointName.c_str());
-  */
-  if (0 != m_endpoints.find(endpointName, ep)) {
+
+  auto &&guard = m_endpoints.make_lock();
+  const auto &find = m_endpoints.find(endpointName);
+  if (find == m_endpoints.end()) {
     // this endpoint does not exist
     ep = new TcrEndpoint(endpointName, m_cache, m_failoverSema, m_cleanupSema,
                          m_redundancySema, dm, false);
-    GF_R_ASSERT(0 == m_endpoints.bind(endpointName, ep));
+    m_endpoints.emplace(endpointName, ep);
+  } else {
+    ep = find->second;
   }
   ep->setNumRegions(ep->numRegions() + 1);
 
@@ -305,11 +301,8 @@ void TcrConnectionManager::disconnect(ThinClientBaseDM *distMng,
                                       bool keepEndpoints) {
   std::lock_guard<decltype(m_distMngrsLock)> guardDistMngrs(m_distMngrsLock);
   {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> guard(m_endpoints.mutex());
-
-    int32_t numEndPoints = static_cast<int32_t>(endpoints.size());
-    for (int32_t i = 0; i < numEndPoints; ++i) {
-      TcrEndpoint *ep = endpoints[i];
+    auto &&guard = m_endpoints.make_lock();
+    for (const auto &ep : endpoints) {
       removeRefToEndpoint(ep, keepEndpoints);
     }
   }
@@ -331,7 +324,7 @@ bool TcrConnectionManager::removeRefToEndpoint(TcrEndpoint *ep,
 
   if (0 == ep->numRegions()) {
     // this endpoint no longer used
-    GF_R_ASSERT(0 == m_endpoints.unbind(ep->name(), ep));
+    m_endpoints.erase(ep->name());
     LOGFINE("delete endpoint %s", ep->name().c_str());
     _GEODE_SAFE_DELETE(ep);
     hasRemovedEndpoint = true;
@@ -346,15 +339,11 @@ int TcrConnectionManager::processEventIdMap(const ACE_Time_Value &currTime,
 
 int TcrConnectionManager::checkConnection(const ACE_Time_Value &,
                                           const void *) {
-  ACE_Guard<ACE_Recursive_Thread_Mutex> guard(m_endpoints.mutex());
-  ACE_Map_Manager<std::string, TcrEndpoint *,
-                  ACE_Recursive_Thread_Mutex>::iterator currItr =
-      m_endpoints.begin();
-  while (currItr != m_endpoints.end()) {
-    if ((*currItr).int_id_->connected() && !m_isNetDown) {
-      (*currItr).int_id_->pingServer();
+  auto &&guard = m_endpoints.make_lock();
+  for (const auto &currItr : m_endpoints) {
+    if (currItr.second->connected() && !m_isNetDown) {
+      currItr.second->pingServer();
     }
-    currItr++;
   }
   return 0;
 }
@@ -365,16 +354,15 @@ int TcrConnectionManager::checkRedundancy(const ACE_Time_Value &,
   return 0;
 }
 
-int TcrConnectionManager::failover(volatile bool &isRunning) {
+void TcrConnectionManager::failover(std::atomic<bool> &isRunning) {
   LOGFINE("TcrConnectionManager: starting failover thread");
   while (isRunning) {
     m_failoverSema.acquire();
     if (isRunning && !m_isNetDown) {
       try {
         std::lock_guard<decltype(m_distMngrsLock)> guard(m_distMngrsLock);
-        for (std::list<ThinClientBaseDM *>::iterator it = m_distMngrs.begin();
-             it != m_distMngrs.end(); ++it) {
-          (*it)->failover();
+        for (const auto &it : m_distMngrs) {
+          it->failover();
         }
         while (m_failoverSema.tryacquire() != -1) {
           ;
@@ -391,24 +379,18 @@ int TcrConnectionManager::failover(volatile bool &isRunning) {
     }
   }
   LOGFINE("TcrConnectionManager: ending failover thread");
-  return 0;
 }
 
 void TcrConnectionManager::getAllEndpoints(
     std::vector<TcrEndpoint *> &endpoints) {
-  ACE_Guard<ACE_Recursive_Thread_Mutex> guard(m_endpoints.mutex());
-
-  for (ACE_Map_Manager<std::string, TcrEndpoint *,
-                       ACE_Recursive_Thread_Mutex>::iterator currItr =
-           m_endpoints.begin();
-       currItr != m_endpoints.end(); currItr++) {
-    endpoints.push_back((*currItr).int_id_);
+  auto &&guard = m_endpoints.make_lock();
+  for (const auto &currItr : m_endpoints) {
+    endpoints.push_back(currItr.second);
   }
 }
 
 int32_t TcrConnectionManager::getNumEndPoints() {
-  ACE_Guard<ACE_Recursive_Thread_Mutex> guard(m_endpoints.mutex());
-  return static_cast<int32_t>(m_endpoints.current_size());
+  return static_cast<int32_t>(m_endpoints.size());
 }
 
 GfErrType TcrConnectionManager::registerInterestAllRegions(
@@ -470,12 +452,10 @@ void TcrConnectionManager::initializeHAEndpoints(const char *endpointsStr) {
 }
 
 void TcrConnectionManager::removeHAEndpoints() {
-  ACE_Guard<ACE_Recursive_Thread_Mutex> guard(m_endpoints.mutex());
-  ACE_Map_Manager<std::string, TcrEndpoint *,
-                  ACE_Recursive_Thread_Mutex>::iterator currItr =
-      m_endpoints.begin();
+  auto &&guard = m_endpoints.make_lock();
+  auto currItr = m_endpoints.begin();
   while (currItr != m_endpoints.end()) {
-    if (removeRefToEndpoint((*currItr).int_id_)) {
+    if (removeRefToEndpoint(currItr->second)) {
       currItr = m_endpoints.begin();
     } else {
       currItr++;
@@ -490,10 +470,9 @@ void TcrConnectionManager::netDown() {
   std::this_thread::sleep_for(std::chrono::seconds(15));
 
   {
-    ACE_Guard<ACE_Recursive_Thread_Mutex> guard(m_endpoints.mutex());
-
+    auto &&guard = m_endpoints.make_lock();
     for (auto &currItr : m_endpoints) {
-      currItr.int_id_->setConnectionStatus(false);
+      currItr.second->setConnectionStatus(false);
     }
   }
 
@@ -510,7 +489,7 @@ void TcrConnectionManager::revive() {
   std::this_thread::sleep_for(std::chrono::seconds(15));
 }
 
-int TcrConnectionManager::redundancy(volatile bool &isRunning) {
+void TcrConnectionManager::redundancy(std::atomic<bool> &isRunning) {
   LOGFINE("Starting subscription maintain redundancy thread.");
   while (isRunning) {
     m_redundancySema.acquire();
@@ -522,7 +501,6 @@ int TcrConnectionManager::redundancy(volatile bool &isRunning) {
     }
   }
   LOGFINE("Ending subscription maintain redundancy thread.");
-  return 0;
 }
 
 void TcrConnectionManager::addNotificationForDeletion(
@@ -534,7 +512,7 @@ void TcrConnectionManager::addNotificationForDeletion(
   m_notifyCleanupSemaList.put(&notifyCleanupSema);
 }
 
-int TcrConnectionManager::cleanup(volatile bool &isRunning) {
+void TcrConnectionManager::cleanup(std::atomic<bool> &isRunning) {
   LOGFINE("TcrConnectionManager: starting cleanup thread");
   do {
     //  If we block on acquire, the queue must be empty (precondition).
@@ -556,7 +534,6 @@ int TcrConnectionManager::cleanup(volatile bool &isRunning) {
   //  Postcondition - all notification channels should be cleaned up by the end
   //  of this function.
   GF_DEV_ASSERT(m_receiverReleaseList.size() == 0);
-  return 0;
 }
 
 void TcrConnectionManager::cleanNotificationLists() {
@@ -573,7 +550,7 @@ void TcrConnectionManager::cleanNotificationLists() {
       notifyCleanupSema = m_notifyCleanupSemaList.get();
     }
     notifyReceiver->wait();
-    _GEODE_SAFE_DELETE(notifyReceiver);
+    //_GEODE_SAFE_DELETE(notifyReceiver);
     _GEODE_SAFE_DELETE(notifyConnection);
     notifyCleanupSema->release();
   }
@@ -587,7 +564,7 @@ void TcrConnectionManager::processMarker() {
 //  TESTING: Durable clients - return queue status of endpoing. Not thread safe.
 bool TcrConnectionManager::getEndpointStatus(const std::string &endpoint) {
   for (auto &currItr : m_endpoints) {
-    auto ep = currItr.int_id_;
+    auto ep = currItr.second;
     const std::string epName = ep->name();
     if (epName == endpoint) return ep->getServerQueueStatusTEST();
   }
diff --git a/cppcache/src/TcrConnectionManager.hpp b/cppcache/src/TcrConnectionManager.hpp
index b5bfba9..837e448 100644
--- a/cppcache/src/TcrConnectionManager.hpp
+++ b/cppcache/src/TcrConnectionManager.hpp
@@ -26,10 +26,7 @@
 #include <unordered_map>
 #include <vector>
 
-#include <ace/Map_Manager.h>
-#include <ace/Recursive_Thread_Mutex.h>
 #include <ace/Semaphore.h>
-#include <ace/Versioned_Namespace.h>
 #include <ace/config-lite.h>
 
 #include <geode/internal/geode_globals.hpp>
@@ -38,6 +35,7 @@
 #include "Queue.hpp"
 #include "Task.hpp"
 #include "ThinClientRedundancyManager.hpp"
+#include "util/synchronized_map.hpp"
 
 namespace apache {
 namespace geode {
@@ -53,7 +51,7 @@ class ThinClientRegion;
 /**
  * @brief transport data between caches
  */
-class APACHE_GEODE_EXPORT TcrConnectionManager {
+class TcrConnectionManager {
  public:
   explicit TcrConnectionManager(CacheImpl* cache);
   ~TcrConnectionManager();
@@ -79,7 +77,8 @@ class APACHE_GEODE_EXPORT TcrConnectionManager {
   void setClientCrashTEST() { TEST_DURABLE_CLIENT_CRASH = true; }
   volatile static bool TEST_DURABLE_CLIENT_CRASH;
 
-  inline ACE_Map_Manager<std::string, TcrEndpoint*, ACE_Recursive_Thread_Mutex>&
+  inline synchronized_map<std::unordered_map<std::string, TcrEndpoint*>,
+                          std::recursive_mutex>&
   getGlobalEndpoints() {
     return m_endpoints;
   }
@@ -145,7 +144,8 @@ class APACHE_GEODE_EXPORT TcrConnectionManager {
  private:
   CacheImpl* m_cache;
   volatile bool m_initGuard;
-  ACE_Map_Manager<std::string, TcrEndpoint*, ACE_Recursive_Thread_Mutex>
+  synchronized_map<std::unordered_map<std::string, TcrEndpoint*>,
+                   std::recursive_mutex>
       m_endpoints;
   std::list<TcrEndpoint*> m_poolEndpointList;
 
@@ -154,7 +154,7 @@ class APACHE_GEODE_EXPORT TcrConnectionManager {
   std::recursive_mutex m_distMngrsLock;
 
   ACE_Semaphore m_failoverSema;
-  Task<TcrConnectionManager>* m_failoverTask;
+  std::unique_ptr<Task<TcrConnectionManager>> m_failoverTask;
 
   bool removeRefToEndpoint(TcrEndpoint* ep, bool keepEndpoint = false);
   TcrEndpoint* addRefToTcrEndpoint(std::string endpointName,
@@ -164,7 +164,7 @@ class APACHE_GEODE_EXPORT TcrConnectionManager {
   void removeHAEndpoints();
 
   ACE_Semaphore m_cleanupSema;
-  Task<TcrConnectionManager>* m_cleanupTask;
+  std::unique_ptr<Task<TcrConnectionManager>> m_cleanupTask;
 
   ExpiryTaskManager::id_type m_pingTaskId;
   ExpiryTaskManager::id_type m_servermonitorTaskId;
@@ -173,7 +173,7 @@ class APACHE_GEODE_EXPORT TcrConnectionManager {
   Queue<ACE_Semaphore*> m_notifyCleanupSemaList;
 
   ACE_Semaphore m_redundancySema;
-  Task<TcrConnectionManager>* m_redundancyTask;
+  std::unique_ptr<Task<TcrConnectionManager>> m_redundancyTask;
   std::recursive_mutex m_notificationLock;
   bool m_isDurable;
 
@@ -181,11 +181,11 @@ class APACHE_GEODE_EXPORT TcrConnectionManager {
 
   ThinClientRedundancyManager* m_redundancyManager;
 
-  int failover(volatile bool& isRunning);
-  int redundancy(volatile bool& isRunning);
+  void failover(std::atomic<bool>& isRunning);
+  void redundancy(std::atomic<bool>& isRunning);
 
   void cleanNotificationLists();
-  int cleanup(volatile bool& isRunning);
+  void cleanup(std::atomic<bool>& isRunning);
 
   // Disallow copy constructor and assignment operator.
   TcrConnectionManager(const TcrConnectionManager&);
diff --git a/cppcache/src/TcrEndpoint.cpp b/cppcache/src/TcrEndpoint.cpp
index 91d942a..7b57e5d 100644
--- a/cppcache/src/TcrEndpoint.cpp
+++ b/cppcache/src/TcrEndpoint.cpp
@@ -28,6 +28,7 @@
 #include "CacheImpl.hpp"
 #include "DistributedSystemImpl.hpp"
 #include "StackTrace.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientPoolHADM.hpp"
 #include "ThinClientRegion.hpp"
 #include "Utils.hpp"
@@ -474,8 +475,9 @@ GfErrType TcrEndpoint::registerDM(bool clientNotification, bool isSecondary,
                   m_name.c_str());
           return err;
         }
-        m_notifyReceiver = new Task<TcrEndpoint>(
-            this, &TcrEndpoint::receiveNotification, NC_Notification);
+        m_notifyReceiver =
+            std::unique_ptr<Task<TcrEndpoint>>(new Task<TcrEndpoint>(
+                this, &TcrEndpoint::receiveNotification, NC_Notification));
         m_notifyReceiver->start();
       }
       ++m_numRegionListener;
@@ -578,7 +580,7 @@ bool TcrEndpoint::checkDupAndAdd(std::shared_ptr<EventId> eventid) {
   return m_cacheImpl->tcrConnectionManager().checkDupAndAdd(eventid);
 }
 
-int TcrEndpoint::receiveNotification(volatile bool& isRunning) {
+void TcrEndpoint::receiveNotification(std::atomic<bool>& isRunning) {
   LOGFINE("Started subscription channel for endpoint %s", m_name.c_str());
   while (isRunning) {
     TcrMessageReply* msg = nullptr;
@@ -730,7 +732,6 @@ int TcrEndpoint::receiveNotification(volatile bool& isRunning) {
     }
   }
   LOGFINE("Ended subscription channel for endpoint %s", m_name.c_str());
-  return 0;
 }
 
 inline bool TcrEndpoint::compareTransactionIds(int32_t reqTransId,
@@ -1239,7 +1240,7 @@ void TcrEndpoint::closeNotification() {
   m_notifyConnection->close();
   m_notifyReceiver->stopNoblock();
   TcrConnectionManager& tccm = m_cacheImpl->tcrConnectionManager();
-  tccm.addNotificationForDeletion(m_notifyReceiver, m_notifyConnection,
+  tccm.addNotificationForDeletion(m_notifyReceiver.get(), m_notifyConnection,
                                   m_notificationCleanupSema);
   m_notifyCount++;
   m_cleanupSema.release();
@@ -1266,46 +1267,29 @@ void TcrEndpoint::stopNotifyReceiverAndCleanup() {
     // m_notifyReceiver->stopNoblock();
     m_notifyReceiver->wait();
     bool found = false;
-    for (std::list<Task<TcrEndpoint>*>::iterator it =
-             m_notifyReceiverList.begin();
-         it != m_notifyReceiverList.end(); it++) {
-      if (*it == m_notifyReceiver) {
+    for (const auto& it : m_notifyReceiverList) {
+      if (it == m_notifyReceiver.get()) {
         found = true;
         break;
       }
     }
 
     if (!found) {
-      _GEODE_SAFE_DELETE(m_notifyReceiver);
+      m_notifyReceiver = nullptr;
       _GEODE_SAFE_DELETE(m_notifyConnection);
     }
   }
 
   m_numRegionListener = 0;
 
-  if (m_notifyReceiverList.size() > 0) {
-    LOGFINER("TcrEndpoint::stopNotifyReceiverAndCleanup: notifylist size = %d",
-             m_notifyReceiverList.size());
-    for (std::list<Task<TcrEndpoint>*>::iterator it =
-             m_notifyReceiverList.begin();
-         it != m_notifyReceiverList.end(); it++) {
-      LOGFINER(
-          "TcrEndpoint::stopNotifyReceiverAndCleanup: deleting old notify "
-          "recievers.");
-      _GEODE_SAFE_DELETE(*it);
-    }
-  }
-
-  if (m_notifyConnectionList.size() > 0) {
+  if (!m_notifyConnectionList.empty()) {
     LOGFINER("TcrEndpoint::stopNotifyReceiverAndCleanup: notifylist size = %d",
              m_notifyConnectionList.size());
-    for (std::list<TcrConnection*>::iterator it =
-             m_notifyConnectionList.begin();
-         it != m_notifyConnectionList.end(); it++) {
+    for (auto& it : m_notifyConnectionList) {
       LOGFINER(
           "TcrEndpoint::stopNotifyReceiverAndCleanup: deleting old notify "
           "connections.");
-      _GEODE_SAFE_DELETE(*it);
+      _GEODE_SAFE_DELETE(it);
     }
   }
 }
diff --git a/cppcache/src/TcrEndpoint.hpp b/cppcache/src/TcrEndpoint.hpp
index f5cd263..4413529 100644
--- a/cppcache/src/TcrEndpoint.hpp
+++ b/cppcache/src/TcrEndpoint.hpp
@@ -22,8 +22,10 @@
 
 #include <atomic>
 #include <list>
+#include <memory>
 #include <mutex>
 #include <string>
+#include <unordered_set>
 
 #include <ace/Condition_Recursive_Thread_Mutex.h>
 #include <ace/Semaphore.h>
@@ -33,9 +35,9 @@
 
 #include "ErrType.hpp"
 #include "FairQueue.hpp"
-#include "Set.hpp"
 #include "Task.hpp"
 #include "TcrConnection.hpp"
+#include "util/synchronized_set.hpp"
 
 namespace apache {
 namespace geode {
@@ -49,7 +51,7 @@ class ThinClientPoolHADM;
 class ThinClientPoolDM;
 class QueryService;
 
-class APACHE_GEODE_EXPORT TcrEndpoint {
+class TcrEndpoint {
  public:
   TcrEndpoint(
       const std::string& name, CacheImpl* cacheImpl,
@@ -74,7 +76,7 @@ class APACHE_GEODE_EXPORT TcrEndpoint {
   // void unregisterPoolDM(  );
 
   void pingServer(ThinClientPoolDM* poolDM = nullptr);
-  int receiveNotification(volatile bool& isRunning);
+  void receiveNotification(std::atomic<bool>& isRunning);
   GfErrType send(const TcrMessage& request, TcrMessageReply& reply);
   GfErrType sendRequestConn(const TcrMessage& request, TcrMessageReply& reply,
                             TcrConnection* conn, std::string& failReason);
@@ -189,7 +191,7 @@ class APACHE_GEODE_EXPORT TcrEndpoint {
 
  protected:
   TcrConnection* m_notifyConnection;
-  Task<TcrEndpoint>* m_notifyReceiver;
+  std::unique_ptr<Task<TcrEndpoint>> m_notifyReceiver;
   CacheImpl* m_cacheImpl;
   std::list<Task<TcrEndpoint>*> m_notifyReceiverList;
   std::list<TcrConnection*> m_notifyConnectionList;
@@ -231,7 +233,7 @@ class APACHE_GEODE_EXPORT TcrEndpoint {
   std::recursive_mutex m_connectionLock;
   std::recursive_mutex m_distMgrsLock;
   ACE_Semaphore m_notificationCleanupSema;
-  Set<uint16_t> m_ports;
+  synchronized_set<std::unordered_set<uint16_t>> m_ports;
   int32_t m_numberOfTimesFailed;
   int m_numRegions;
   int m_pingTimeouts;
diff --git a/cppcache/src/TcrHADistributionManager.cpp b/cppcache/src/TcrHADistributionManager.cpp
index 3ede87d..91dfa06 100644
--- a/cppcache/src/TcrHADistributionManager.cpp
+++ b/cppcache/src/TcrHADistributionManager.cpp
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 #include "TcrHADistributionManager.hpp"
 
 #include <geode/ExceptionTypes.hpp>
@@ -21,6 +22,7 @@
 
 #include "CacheImpl.hpp"
 #include "RemoteQueryService.hpp"
+#include "TcrEndpoint.hpp"
 #include "TcrMessage.hpp"
 #include "ThinClientHARegion.hpp"
 #include "ThinClientRegion.hpp"
diff --git a/cppcache/src/TcrHADistributionManager.hpp b/cppcache/src/TcrHADistributionManager.hpp
index e0a91fe..fb293ae 100644
--- a/cppcache/src/TcrHADistributionManager.hpp
+++ b/cppcache/src/TcrHADistributionManager.hpp
@@ -1,8 +1,3 @@
-#pragma once
-
-#ifndef GEODE_TCRHADISTRIBUTIONMANAGER_H_
-#define GEODE_TCRHADISTRIBUTIONMANAGER_H_
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -20,10 +15,15 @@
  * limitations under the License.
  */
 
+#pragma once
+
+#ifndef GEODE_TCRHADISTRIBUTIONMANAGER_H_
+#define GEODE_TCRHADISTRIBUTIONMANAGER_H_
+
 #include <geode/CacheAttributes.hpp>
 #include <geode/internal/geode_base.hpp>
 
-#include "TcrEndpoint.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientDistributionManager.hpp"
 
 namespace apache {
@@ -32,6 +32,8 @@ namespace client {
 
 class ThinClientRegion;
 class ThinClientHARegion;
+class TcrEndpoint;
+
 /**
  * @brief Distribute data between caches
  */
diff --git a/cppcache/src/TcrMessage.cpp b/cppcache/src/TcrMessage.cpp
index 40a1b69..81f69d8 100644
--- a/cppcache/src/TcrMessage.cpp
+++ b/cppcache/src/TcrMessage.cpp
@@ -36,6 +36,7 @@
 #include "TXState.hpp"
 #include "TcrChunkedContext.hpp"
 #include "TcrConnection.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientBaseDM.hpp"
 #include "ThinClientPoolDM.hpp"
 #include "ThinClientRegion.hpp"
diff --git a/cppcache/src/TcrMessage.hpp b/cppcache/src/TcrMessage.hpp
index 5a78846..46665c0 100644
--- a/cppcache/src/TcrMessage.hpp
+++ b/cppcache/src/TcrMessage.hpp
@@ -33,6 +33,7 @@
 #include <geode/DataInput.hpp>
 #include <geode/DataOutput.hpp>
 #include <geode/ExceptionTypes.hpp>
+#include <geode/Region.hpp>
 #include <geode/Serializable.hpp>
 #include <geode/internal/geode_globals.hpp>
 
diff --git a/cppcache/src/TcrPoolEndPoint.cpp b/cppcache/src/TcrPoolEndPoint.cpp
index db114f8..c3ff4c0 100644
--- a/cppcache/src/TcrPoolEndPoint.cpp
+++ b/cppcache/src/TcrPoolEndPoint.cpp
@@ -60,7 +60,7 @@ void TcrPoolEndPoint::closeNotification() {
   LOGFINE("TcrPoolEndPoint::closeNotification..");
   m_notifyReceiver->stopNoblock();
   m_notifyConnectionList.push_back(m_notifyConnection);
-  m_notifyReceiverList.push_back(m_notifyReceiver);
+  m_notifyReceiverList.push_back(m_notifyReceiver.get());
   m_isQueueHosted = false;
 }
 
@@ -98,8 +98,8 @@ GfErrType TcrPoolEndPoint::registerDM(bool, bool isSecondary, bool,
               name().c_str());
       return err;
     }
-    m_notifyReceiver = new Task<TcrEndpoint>(
-        this, &TcrEndpoint::receiveNotification, NC_Notification);
+    m_notifyReceiver = std::unique_ptr<Task<TcrEndpoint>>(new Task<TcrEndpoint>(
+        this, &TcrEndpoint::receiveNotification, NC_Notification));
     m_notifyReceiver->start();
   }
   ++m_numRegionListener;
diff --git a/cppcache/src/ThinClientBaseDM.cpp b/cppcache/src/ThinClientBaseDM.cpp
index 5d402b3..b062915 100644
--- a/cppcache/src/ThinClientBaseDM.cpp
+++ b/cppcache/src/ThinClientBaseDM.cpp
@@ -21,6 +21,7 @@
 
 #include <geode/AuthenticatedView.hpp>
 
+#include "TcrConnectionManager.hpp"
 #include "ThinClientRegion.hpp"
 #include "UserAttributes.hpp"
 
@@ -214,10 +215,10 @@ void ThinClientBaseDM::queueChunk(TcrChunkedContext* chunk) {
 }
 
 // the chunk processing thread
-int ThinClientBaseDM::processChunks(volatile bool& isRunning) {
+void ThinClientBaseDM::processChunks(std::atomic<bool>& isRunning) {
   TcrChunkedContext* chunk;
   LOGFINE("Starting chunk process thread for region %s",
-          (m_region != nullptr ? m_region->getFullPath().c_str() : "(null)"));
+          (m_region ? m_region->getFullPath().c_str() : "(null)"));
   while (isRunning) {
     chunk = m_chunks.getFor(std::chrono::microseconds(100000));
     if (chunk) {
@@ -226,27 +227,27 @@ int ThinClientBaseDM::processChunks(volatile bool& isRunning) {
     }
   }
   LOGFINE("Ending chunk process thread for region %s",
-          (m_region != nullptr ? m_region->getFullPath().c_str() : "(null)"));
+          (m_region ? m_region->getFullPath().c_str() : "(null)"));
   GF_DEV_ASSERT(m_chunks.size() == 0);
-  return 0;
 }
 
 // start the chunk processing thread
 void ThinClientBaseDM::startChunkProcessor() {
   if (m_chunkProcessor == nullptr) {
     m_chunks.open();
-    m_chunkProcessor = new Task<ThinClientBaseDM>(
-        this, &ThinClientBaseDM::processChunks, NC_ProcessChunk);
+    m_chunkProcessor =
+        std::unique_ptr<Task<ThinClientBaseDM>>(new Task<ThinClientBaseDM>(
+            this, &ThinClientBaseDM::processChunks, NC_ProcessChunk));
     m_chunkProcessor->start();
   }
 }
 
 // stop the chunk processing thread
 void ThinClientBaseDM::stopChunkProcessor() {
-  if (m_chunkProcessor != nullptr) {
+  if (m_chunkProcessor) {
     m_chunkProcessor->stop();
     m_chunks.close();
-    _GEODE_SAFE_DELETE(m_chunkProcessor);
+    m_chunkProcessor = nullptr;
   }
 }
 
@@ -330,6 +331,14 @@ GfErrType ThinClientBaseDM::registerInterestForRegion(TcrEndpoint*,
 
 bool ThinClientBaseDM::isEndpointAttached(TcrEndpoint*) { return false; }
 
+bool ThinClientBaseDM::checkDupAndAdd(std::shared_ptr<EventId> eventid) {
+  return m_connManager.checkDupAndAdd(eventid);
+}
+
+std::recursive_mutex& ThinClientBaseDM::getRedundancyLock() {
+  return m_connManager.getRedundancyLock();
+}
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/ThinClientBaseDM.hpp b/cppcache/src/ThinClientBaseDM.hpp
index 70a35ec..779f441 100644
--- a/cppcache/src/ThinClientBaseDM.hpp
+++ b/cppcache/src/ThinClientBaseDM.hpp
@@ -20,23 +20,30 @@
 #ifndef GEODE_THINCLIENTBASEDM_H_
 #define GEODE_THINCLIENTBASEDM_H_
 
+#include <memory>
 #include <vector>
 
 #include <geode/internal/geode_globals.hpp>
 
-#include "TcrConnectionManager.hpp"
-#include "TcrEndpoint.hpp"
+#include "Queue.hpp"
+#include "Task.hpp"
+#include "util/Log.hpp"
 
 namespace apache {
 namespace geode {
 namespace client {
 
-/**
- * @brief Distribute data between caches
- */
 class TcrMessage;
 class ThinClientRegion;
+class TcrEndpoint;
+class TcrConnectionManager;
+class TcrMessageReply;
+class TcrChunkedContext;
+class EventId;
 
+/**
+ * @brief Distribute data between caches
+ */
 class ThinClientBaseDM {
  public:
   ThinClientBaseDM(TcrConnectionManager& connManager, ThinClientRegion* region);
@@ -108,13 +115,9 @@ class ThinClientBaseDM {
 
   virtual TcrEndpoint* getActiveEndpoint() { return nullptr; }
 
-  virtual bool checkDupAndAdd(std::shared_ptr<EventId> eventid) {
-    return m_connManager.checkDupAndAdd(eventid);
-  }
+  virtual bool checkDupAndAdd(std::shared_ptr<EventId> eventid);
 
-  virtual std::recursive_mutex& getRedundancyLock() {
-    return m_connManager.getRedundancyLock();
-  }
+  virtual std::recursive_mutex& getRedundancyLock();
 
   static bool isDeltaEnabledOnServer() { return s_isDeltaEnabledOnServer; }
 
@@ -172,7 +175,7 @@ class ThinClientBaseDM {
   ThinClientRegion* m_region;
 
   // methods for the chunk processing thread
-  int processChunks(volatile bool& isRunning);
+  void processChunks(std::atomic<bool>& isRunning);
   void startChunkProcessor();
   void stopChunkProcessor();
 
@@ -193,7 +196,7 @@ class ThinClientBaseDM {
   bool m_clientNotification;
 
   Queue<TcrChunkedContext*> m_chunks;
-  Task<ThinClientBaseDM>* m_chunkProcessor;
+  std::unique_ptr<Task<ThinClientBaseDM>> m_chunkProcessor;
 
  private:
   static volatile bool s_isDeltaEnabledOnServer;
diff --git a/cppcache/src/ThinClientCacheDistributionManager.cpp b/cppcache/src/ThinClientCacheDistributionManager.cpp
index faba3bd..7a67a10 100644
--- a/cppcache/src/ThinClientCacheDistributionManager.cpp
+++ b/cppcache/src/ThinClientCacheDistributionManager.cpp
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 #include "ThinClientCacheDistributionManager.hpp"
 
 #include <algorithm>
@@ -25,6 +26,7 @@
 #include "CacheImpl.hpp"
 #include "ReadWriteLock.hpp"
 #include "RemoteQueryService.hpp"
+#include "TcrConnectionManager.hpp"
 #include "TcrEndpoint.hpp"
 #include "TcrMessage.hpp"
 #include "ThinClientBaseDM.hpp"
@@ -134,8 +136,7 @@ bool ThinClientCacheDistributionManager::preFailoverAction() {
   //  take the global endpoint lock so that the global endpoints list
   // does not change while we are (possibly) adding endpoint to this endpoints
   // list and incrementing the reference count of endpoint
-  ACE_Guard<ACE_Recursive_Thread_Mutex> guard(
-      m_connManager.getGlobalEndpoints().mutex());
+  auto&& guard = m_connManager.getGlobalEndpoints().make_lock();
   //  This method is called at the time of failover to refresh
   // the list of endpoints.
   std::vector<TcrEndpoint*> currentGlobalEndpointsList;
@@ -143,23 +144,18 @@ bool ThinClientCacheDistributionManager::preFailoverAction() {
 
   //  Update local list with new endpoints.
   std::vector<TcrEndpoint*> newEndpointsList;
-  for (std::vector<TcrEndpoint*>::iterator it =
-           currentGlobalEndpointsList.begin();
-       it != currentGlobalEndpointsList.end(); ++it) {
+  for (const auto& it : currentGlobalEndpointsList) {
     bool found = false;
-    for (std::vector<TcrEndpoint*>::iterator currIter = m_endpoints.begin();
-         currIter != m_endpoints.end(); ++currIter) {
-      if (*currIter == *it) {
+    for (const auto& currIter : m_endpoints) {
+      if (currIter == it) {
         found = true;
         break;
       }
     }
-    if (!found) newEndpointsList.push_back(*it);
+    if (!found) newEndpointsList.push_back(it);
   }
 
-  for (std::vector<TcrEndpoint*>::iterator it = newEndpointsList.begin();
-       it != newEndpointsList.end(); ++it) {
-    TcrEndpoint* ep = *it;
+  for (const auto& ep : newEndpointsList) {
     m_endpoints.push_back(ep);
     ep->setNumRegions(ep->numRegions() + 1);
     LOGFINER(
diff --git a/cppcache/src/ThinClientDistributionManager.cpp b/cppcache/src/ThinClientDistributionManager.cpp
index 5aec9e7..5bf8cfe 100644
--- a/cppcache/src/ThinClientDistributionManager.cpp
+++ b/cppcache/src/ThinClientDistributionManager.cpp
@@ -23,6 +23,8 @@
 #include <geode/SystemProperties.hpp>
 
 #include "DistributedSystemImpl.hpp"
+#include "TcrConnectionManager.hpp"
+#include "TcrEndpoint.hpp"
 #include "ThinClientRegion.hpp"
 #include "util/exception.hpp"
 
diff --git a/cppcache/src/ThinClientLocatorHelper.cpp b/cppcache/src/ThinClientLocatorHelper.cpp
index 153e339..e7174ce 100644
--- a/cppcache/src/ThinClientLocatorHelper.cpp
+++ b/cppcache/src/ThinClientLocatorHelper.cpp
@@ -32,6 +32,7 @@
 #include "QueueConnectionRequest.hpp"
 #include "QueueConnectionResponse.hpp"
 #include "TcpSslConn.hpp"
+#include "TcrConnectionManager.hpp"
 #include "ThinClientPoolDM.hpp"
 
 namespace apache {
diff --git a/cppcache/src/ThinClientLocatorHelper.hpp b/cppcache/src/ThinClientLocatorHelper.hpp
index 04bde66..c72f950 100644
--- a/cppcache/src/ThinClientLocatorHelper.hpp
+++ b/cppcache/src/ThinClientLocatorHelper.hpp
@@ -28,17 +28,17 @@
 #include <geode/internal/geode_globals.hpp>
 
 #include "ClientProxyMembershipID.hpp"
+#include "ErrType.hpp"
 #include "GetAllServersRequest.hpp"
 #include "GetAllServersResponse.hpp"
 #include "ServerLocation.hpp"
-#include "TcrEndpoint.hpp"
 
 namespace apache {
 namespace geode {
 namespace client {
 
-class TcrEndpoint;
 class ThinClientPoolDM;
+class Connector;
 
 class ThinClientLocatorHelper {
  public:
diff --git a/cppcache/src/ThinClientPoolDM.cpp b/cppcache/src/ThinClientPoolDM.cpp
index 59ba3ed..1e85003 100644
--- a/cppcache/src/ThinClientPoolDM.cpp
+++ b/cppcache/src/ThinClientPoolDM.cpp
@@ -31,6 +31,7 @@
 #include "ExpiryHandler_T.hpp"
 #include "ExpiryTaskManager.hpp"
 #include "NonCopyable.hpp"
+#include "TcrConnectionManager.hpp"
 #include "TcrEndpoint.hpp"
 #include "ThinClientRegion.hpp"
 #include "ThinClientStickyManager.hpp"
@@ -207,7 +208,8 @@ ThinClientPoolDM::ThinClientPoolDM(const char* name,
     }
   }
   if (m_attrs->getPRSingleHopEnabled()) {
-    m_clientMetadataService = new ClientMetadataService(this);
+    m_clientMetadataService =
+        std::unique_ptr<ClientMetadataService>(new ClientMetadataService(this));
   }
   m_manager = new ThinClientStickyManager(this);
 }
@@ -234,6 +236,14 @@ void ThinClientPoolDM::init() {
 
   LOGDEBUG("ThinClientPoolDM::init: Completed initialization");
 }
+
+ThinClientPoolDM::~ThinClientPoolDM() {
+  destroy();
+  _GEODE_SAFE_DELETE(m_locHelper);
+  _GEODE_SAFE_DELETE(m_stats);
+  _GEODE_SAFE_DELETE(m_manager);
+}
+
 std::shared_ptr<Properties> ThinClientPoolDM::getCredentials(TcrEndpoint* ep) {
   auto cacheImpl = m_connManager.getCacheImpl();
   const auto& distributedSystem = cacheImpl->getDistributedSystem();
@@ -256,8 +266,9 @@ std::shared_ptr<Properties> ThinClientPoolDM::getCredentials(TcrEndpoint* ep) {
 
 void ThinClientPoolDM::startBackgroundThreads() {
   LOGDEBUG("ThinClientPoolDM::startBackgroundThreads: Starting ping thread");
-  m_pingTask = new Task<ThinClientPoolDM>(this, &ThinClientPoolDM::pingServer,
-                                          NC_Ping_Thread);
+  m_pingTask =
+      std::unique_ptr<Task<ThinClientPoolDM>>(new Task<ThinClientPoolDM>(
+          this, &ThinClientPoolDM::pingServer, NC_Ping_Thread));
   m_pingTask->start();
 
   auto& props = m_connManager.getCacheImpl()
@@ -266,7 +277,8 @@ void ThinClientPoolDM::startBackgroundThreads() {
 
   if (props.onClientDisconnectClearPdxTypeIds() == true) {
     m_cliCallbackTask =
-        new Task<ThinClientPoolDM>(this, &ThinClientPoolDM::cliCallback);
+        std::unique_ptr<Task<ThinClientPoolDM>>(new Task<ThinClientPoolDM>(
+            this, &ThinClientPoolDM::cliCallback, "NC_cliCallback"));
     m_cliCallbackTask->start();
   }
 
@@ -293,7 +305,8 @@ void ThinClientPoolDM::startBackgroundThreads() {
 
   if (updateLocatorListInterval > 0) {
     m_updateLocatorListTask =
-        new Task<ThinClientPoolDM>(this, &ThinClientPoolDM::updateLocatorList);
+        std::unique_ptr<Task<ThinClientPoolDM>>(new Task<ThinClientPoolDM>(
+            this, &ThinClientPoolDM::updateLocatorList, "NC_LocatorList"));
     m_updateLocatorListTask->start();
 
     updateLocatorListInterval = updateLocatorListInterval / 1000;  // seconds
@@ -316,8 +329,9 @@ void ThinClientPoolDM::startBackgroundThreads() {
       "ThinClientPoolDM::startBackgroundThreads: Starting manageConnections "
       "thread");
   // Manage Connection Thread
-  m_connManageTask = new Task<ThinClientPoolDM>(
-      this, &ThinClientPoolDM::manageConnections, NC_MC_Thread);
+  m_connManageTask =
+      std::unique_ptr<Task<ThinClientPoolDM>>(new Task<ThinClientPoolDM>(
+          this, &ThinClientPoolDM::manageConnections, NC_MC_Thread));
   m_connManageTask->start();
 
   auto idle = getIdleTimeout();
@@ -354,11 +368,12 @@ void ThinClientPoolDM::startBackgroundThreads() {
 
   LOGDEBUG(
       "ThinClientPoolDM::startBackgroundThreads: Starting pool stat sampler");
-  if (m_PoolStatsSampler == nullptr &&
+  if (!m_PoolStatsSampler &&
       getStatisticInterval() > std::chrono::milliseconds::zero() &&
       props.statisticsEnabled()) {
-    m_PoolStatsSampler = new statistics::PoolStatsSampler(
-        getStatisticInterval(), m_connManager.getCacheImpl(), this);
+    m_PoolStatsSampler = std::unique_ptr<statistics::PoolStatsSampler>(
+        new statistics::PoolStatsSampler(getStatisticInterval(),
+                                         m_connManager.getCacheImpl(), this));
     m_PoolStatsSampler->start();
   }
 
@@ -369,7 +384,7 @@ void ThinClientPoolDM::startBackgroundThreads() {
     m_clientMetadataService->start();
   }
 }
-int ThinClientPoolDM::manageConnections(volatile bool& isRunning) {
+void ThinClientPoolDM::manageConnections(std::atomic<bool>& isRunning) {
   LOGFINE("ThinClientPoolDM: starting manageConnections thread");
 
   while (isRunning) {
@@ -382,10 +397,9 @@ int ThinClientPoolDM::manageConnections(volatile bool& isRunning) {
     }
   }
   LOGFINE("ThinClientPoolDM: ending manageConnections thread");
-  return 0;
 }
 
-void ThinClientPoolDM::cleanStaleConnections(volatile bool& isRunning) {
+void ThinClientPoolDM::cleanStaleConnections(std::atomic<bool>& isRunning) {
   if (!isRunning) {
     return;
   }
@@ -488,9 +502,10 @@ void ThinClientPoolDM::cleanStaleConnections(volatile bool& isRunning) {
 
   LOGDEBUG("Pool size is %d, pool counter is %d", size(), m_poolSize.load());
 }
-void ThinClientPoolDM::cleanStickyConnections(volatile bool&) {}
 
-void ThinClientPoolDM::restoreMinConnections(volatile bool& isRunning) {
+void ThinClientPoolDM::cleanStickyConnections(std::atomic<bool>&) {}
+
+void ThinClientPoolDM::restoreMinConnections(std::atomic<bool>& isRunning) {
   if (!isRunning) {
     return;
   }
@@ -522,7 +537,7 @@ void ThinClientPoolDM::restoreMinConnections(volatile bool& isRunning) {
   LOGDEBUG("Pool size is %d, pool counter is %d", size(), m_poolSize.load());
 }
 
-int ThinClientPoolDM::manageConnectionsInternal(volatile bool& isRunning) {
+void ThinClientPoolDM::manageConnectionsInternal(std::atomic<bool>& isRunning) {
   try {
     LOGFINE(
         "ThinClientPoolDM::manageConnections(): checking connections in pool "
@@ -543,7 +558,6 @@ int ThinClientPoolDM::manageConnectionsInternal(volatile bool& isRunning) {
   } catch (...) {
     LOGERROR("Unexpected exception during manage connections");
   }
-  return 0;
 }
 
 std::string ThinClientPoolDM::selectEndpoint(
@@ -608,6 +622,7 @@ void ThinClientPoolDM::addConnection(TcrConnection* conn) {
   put(conn, false);
   ++m_poolSize;
 }
+
 GfErrType ThinClientPoolDM::sendRequestToAllServers(
     const char* func, uint8_t getResult, std::chrono::milliseconds timeout,
     std::shared_ptr<Cacheable> args, std::shared_ptr<ResultCollector>& rs,
@@ -625,30 +640,30 @@ GfErrType ThinClientPoolDM::sendRequestToAllServers(
     return GF_NOSERVER_FOUND;
   }
 
-  int feIndex = 0;
-  FunctionExecution* fePtrList = new FunctionExecution[csArray->length()];
-  auto threadPool = m_connManager.getCacheImpl()->getThreadPool();
+  std::vector<std::shared_ptr<FunctionExecution>> fePtrList;
+  fePtrList.reserve(csArray->length());
+  auto& threadPool = m_connManager.getCacheImpl()->getThreadPool();
   auto userAttr = UserAttributes::threadLocalUserAttributes;
   for (int i = 0; i < csArray->length(); i++) {
     auto cs = (*csArray)[i];
-    std::string endpointStr(cs->value().c_str());
-    TcrEndpoint* ep = nullptr;
-    if (m_endpoints.find(endpointStr, ep)) {
-      ep = addEP(cs->value().c_str());
+    auto endpointStr = cs->value();
+    auto ep = getEndpoint(endpointStr);
+    if (!ep) {
+      ep = addEP(cs->value());
     } else if (!ep->connected()) {
       LOGFINE(
           "ThinClientPoolDM::sendRequestToAllServers server not connected %s ",
           cs->value().c_str());
     }
-    FunctionExecution* funcExe = &fePtrList[feIndex++];
+    auto funcExe = std::make_shared<FunctionExecution>();
     funcExe->setParameters(func, getResult, timeout, args, ep, this,
                            resultCollectorLock, &rs, userAttr);
-    threadPool->perform(funcExe);
+    fePtrList.push_back(funcExe);
+    threadPool.perform(funcExe);
   }
   GfErrType finalErrorReturn = GF_NOERR;
 
-  for (int i = 0; i < feIndex; i++) {
-    FunctionExecution* funcExe = &fePtrList[i];
+  for (auto& funcExe : fePtrList) {
     err = funcExe->getResult();
     if (err != GF_NOERR) {
       if (funcExe->getException() == nullptr) {
@@ -688,7 +703,6 @@ GfErrType ThinClientPoolDM::sendRequestToAllServers(
   getStats().setCurClientOps(--m_clientOps);
   getStats().incSucceedClientOps();
 
-  delete[] fePtrList;
   return finalErrorReturn;
 }
 
@@ -738,7 +752,7 @@ void ThinClientPoolDM::stopPingThread() {
     m_pingTask->stopNoblock();
     m_pingSema.release();
     m_pingTask->wait();
-    _GEODE_SAFE_DELETE(m_pingTask);
+    m_pingTask = nullptr;
     if (m_pingTaskId >= 0) {
       m_connManager.getCacheImpl()->getExpiryTaskManager().cancelTask(
           m_pingTaskId);
@@ -752,7 +766,7 @@ void ThinClientPoolDM::stopUpdateLocatorListThread() {
     m_updateLocatorListTask->stopNoblock();
     m_updateLocatorListSema.release();
     m_updateLocatorListTask->wait();
-    _GEODE_SAFE_DELETE(m_updateLocatorListTask);
+    m_updateLocatorListTask = nullptr;
     if (m_updateLocatorListTaskId >= 0) {
       m_connManager.getCacheImpl()->getExpiryTaskManager().cancelTask(
           m_updateLocatorListTaskId);
@@ -766,7 +780,7 @@ void ThinClientPoolDM::stopCliCallbackThread() {
     m_cliCallbackTask->stopNoblock();
     m_cliCallbackSema.release();
     m_cliCallbackTask->wait();
-    _GEODE_SAFE_DELETE(m_cliCallbackTask);
+    m_cliCallbackTask = nullptr;
   }
 }
 
@@ -781,21 +795,21 @@ void ThinClientPoolDM::destroy(bool keepAlive) {
     }
 
     LOGDEBUG("Closing PoolStatsSampler thread.");
-    if (m_PoolStatsSampler != nullptr) {
+    if (m_PoolStatsSampler) {
       m_PoolStatsSampler->stop();
-      _GEODE_SAFE_DELETE(m_PoolStatsSampler);
+      m_PoolStatsSampler = nullptr;
     }
     LOGDEBUG("PoolStatsSampler thread closed .");
     stopCliCallbackThread();
     LOGDEBUG("ThinClientPoolDM::destroy( ): Closing connection manager.");
+    auto cacheImpl = m_connManager.getCacheImpl();
     if (m_connManageTask) {
       m_connManageTask->stopNoblock();
       m_connSema.release();
       m_connManageTask->wait();
-      _GEODE_SAFE_DELETE(m_connManageTask);
+      m_connManageTask = nullptr;
       if (m_connManageTaskId >= 0) {
-        m_connManager.getCacheImpl()->getExpiryTaskManager().cancelTask(
-            m_connManageTaskId);
+        cacheImpl->getExpiryTaskManager().cancelTask(m_connManageTaskId);
       }
     }
 
@@ -803,8 +817,9 @@ void ThinClientPoolDM::destroy(bool keepAlive) {
     stopPingThread();
     stopUpdateLocatorListThread();
 
-    if (m_clientMetadataService != nullptr) {
+    if (m_clientMetadataService) {
       m_clientMetadataService->stop();
+      // m_clientMetadataService = nullptr;
     }
     // closing all the thread local connections ( sticky).
     LOGDEBUG("ThinClientPoolDM::destroy( ): closing FairQueue, pool size = %d",
@@ -812,8 +827,8 @@ void ThinClientPoolDM::destroy(bool keepAlive) {
     close();
     LOGDEBUG("ThinClientPoolDM::destroy( ): after close ");
 
-    for (auto& iter : m_endpoints) {
-      auto ep = iter.int_id_;
+    for (const auto& iter : m_endpoints) {
+      auto ep = iter.second;
       LOGFINE("ThinClientPoolDM: forcing endpoint delete for %d in destructor",
               ep->name().c_str());
       _GEODE_SAFE_DELETE(ep);
@@ -821,14 +836,9 @@ void ThinClientPoolDM::destroy(bool keepAlive) {
 
     // Close Stats
     getStats().close();
-    m_connManager.getCacheImpl()->getStatisticsManager().forceSample();
+    cacheImpl->getStatisticsManager().forceSample();
 
-    if (m_clientMetadataService != nullptr) {
-      _GEODE_SAFE_DELETE(m_clientMetadataService);
-    }
-
-    m_connManager.getCacheImpl()->getPoolManager().removePool(
-        m_poolName.c_str());
+    cacheImpl->getPoolManager().removePool(m_poolName);
 
     stopChunkProcessor();
     m_manager->closeAllStickyConnections();
@@ -1179,7 +1189,8 @@ TcrEndpoint* ThinClientPoolDM::getEndPoint(
       return ep;
     }
 
-    if (m_endpoints.find(serverLocation->getEpString(), ep) != -1) {
+    ep = getEndpoint(serverLocation->getEpString());
+    if (ep) {
       LOGDEBUG("Endpoint for single hop is %p", ep);
       return ep;
     }
@@ -1223,14 +1234,13 @@ TcrEndpoint* ThinClientPoolDM::getEndPoint(
   return ep;
 }
 
-// gets the endpoint from the list of endpoints using the endpoint Name
-TcrEndpoint* ThinClientPoolDM::getEndPoint(std::string epNameStr) {
-  TcrEndpoint* ep = nullptr;
-  if (m_endpoints.find(epNameStr, ep) != -1) {
-    LOGDEBUG("Endpoint for single hop is %p", ep);
-    return ep;
+TcrEndpoint* ThinClientPoolDM::getEndpoint(const std::string& endpointName) {
+  auto&& guard = m_endpoints.make_lock();
+  const auto& find = m_endpoints.find(endpointName);
+  if (find == m_endpoints.end()) {
+    return nullptr;
   }
-  return ep;
+  return find->second;
 }
 
 GfErrType ThinClientPoolDM::sendSyncRequest(TcrMessage& request,
@@ -1256,9 +1266,9 @@ GfErrType ThinClientPoolDM::sendSyncRequest(TcrMessage& request,
       return sendSyncRequest(request, reply, attemptFailover, isBGThread,
                              nullptr);
     }
-    std::vector<GetAllWork*> getAllWorkers;
-    auto* threadPool = m_connManager.getCacheImpl()->getThreadPool();
-    ChunkedGetAllResponse* responseHandler =
+    std::vector<std::shared_ptr<GetAllWork>> getAllWorkers;
+    auto& threadPool = m_connManager.getCacheImpl()->getThreadPool();
+    auto responseHandler =
         static_cast<ChunkedGetAllResponse*>(reply.getChunkedResultHandler());
 
     for (const auto& locationIter : *locationMap) {
@@ -1266,18 +1276,16 @@ GfErrType ThinClientPoolDM::sendSyncRequest(TcrMessage& request,
       if (serverLocation == nullptr) {
       }
       const auto& keys = locationIter.second;
-      auto worker =
-          new GetAllWork(this, region, serverLocation, keys, attemptFailover,
-                         isBGThread, responseHandler->getAddToLocalCache(),
-                         responseHandler, request.getCallbackArgument());
-      threadPool->perform(worker);
+      auto worker = std::make_shared<GetAllWork>(
+          this, region, serverLocation, keys, attemptFailover, isBGThread,
+          responseHandler->getAddToLocalCache(), responseHandler,
+          request.getCallbackArgument());
+      threadPool.perform(worker);
       getAllWorkers.push_back(worker);
     }
     reply.setMessageType(TcrMessage::RESPONSE);
 
-    for (std::vector<GetAllWork*>::iterator iter = getAllWorkers.begin();
-         iter != getAllWorkers.end(); iter++) {
-      GetAllWork* worker = *iter;
+    for (auto& worker : getAllWorkers) {
       GfErrType err = worker->getResult();
 
       if (err != GF_NOERR) {
@@ -1288,8 +1296,6 @@ GfErrType ThinClientPoolDM::sendSyncRequest(TcrMessage& request,
       if (currentReply->getMessageType() != TcrMessage::RESPONSE) {
         reply.setMessageType(currentReply->getMessageType());
       }
-
-      delete worker;
     }
     return error;
   } else {
@@ -1617,7 +1623,7 @@ GfErrType ThinClientPoolDM::getConnectionToAnEndPoint(std::string epNameStr,
   conn = nullptr;
 
   GfErrType error = GF_NOERR;
-  auto theEP = getEndPoint(epNameStr);
+  auto theEP = getEndpoint(epNameStr);
 
   LOGFINE(
       "ThinClientPoolDM::getConnectionToAnEndPoint( ): Getting endpoint object "
@@ -1792,9 +1798,9 @@ GfErrType ThinClientPoolDM::createPoolConnection(
       LOGFINE("Endpoint selection failed");
       return GF_NOTCON;
     }
+
     LOGFINE("Connecting to %s", epNameStr.c_str());
-    TcrEndpoint* ep = nullptr;
-    ep = addEP(epNameStr.c_str());
+    auto ep = addEP(epNameStr);
 
     if (currentserver != nullptr &&
         epNameStr == currentserver->getEndpointObject()->name()) {
@@ -2040,24 +2046,23 @@ TcrEndpoint* ThinClientPoolDM::addEP(ServerLocation& serverLoc) {
   return addEP(endpointName);
 }
 
-TcrEndpoint* ThinClientPoolDM::addEP(const char* endpointName) {
+TcrEndpoint* ThinClientPoolDM::addEP(const std::string& endpointName) {
   std::lock_guard<decltype(m_endpointsLock)> guard(m_endpointsLock);
-  TcrEndpoint* ep = nullptr;
 
-  std::string fullName = endpointName;
-  if (m_endpoints.find(fullName, ep)) {
-    LOGFINE("Created new endpoint %s for pool %s", fullName.c_str(),
+  auto ep = getEndpoint(endpointName);
+  if (!ep) {
+    LOGFINE("Created new endpoint %s for pool %s", endpointName.c_str(),
             m_poolName.c_str());
-    ep = createEP(fullName.c_str());
-    if (m_endpoints.bind(fullName, ep)) {
-      LOGERROR("Failed to add endpoint %s to pool %s", fullName.c_str(),
+    ep = createEP(endpointName.c_str());
+    if (m_endpoints.emplace(endpointName, ep).second) {
+      LOGERROR("Failed to add endpoint %s to pool %s", endpointName.c_str(),
                m_poolName.c_str());
       GF_DEV_ASSERT(
           "ThinClientPoolDM::addEP( ): failed to add endpoint" ? false : false);
     }
   }
   // Update Server Stats
-  getStats().setServers(static_cast<int32_t>(m_endpoints.current_size()));
+  getStats().setServers(static_cast<int32_t>(m_endpoints.size()));
   return ep;
 }
 
@@ -2071,17 +2076,18 @@ void ThinClientPoolDM::pingServerLocal() {
   ACE_Guard<ACE_Recursive_Thread_Mutex> _guard(getPoolLock());
   std::lock_guard<decltype(m_endpointsLock)> guard(m_endpointsLock);
   for (auto& it : m_endpoints) {
-    if (it.int_id_->connected()) {
-      it.int_id_->pingServer(this);
-      if (!it.int_id_->connected()) {
-        removeEPConnections(it.int_id_);
-        removeCallbackConnection(it.int_id_);
+    auto endpoint = it.second;
+    if (endpoint->connected()) {
+      endpoint->pingServer(this);
+      if (!endpoint->connected()) {
+        removeEPConnections(endpoint);
+        removeCallbackConnection(endpoint);
       }
     }
   }
 }
 
-int ThinClientPoolDM::updateLocatorList(volatile bool& isRunning) {
+void ThinClientPoolDM::updateLocatorList(std::atomic<bool>& isRunning) {
   LOGFINE("Starting updateLocatorList thread for pool %s", m_poolName.c_str());
   while (isRunning) {
     m_updateLocatorListSema.acquire();
@@ -2090,10 +2096,9 @@ int ThinClientPoolDM::updateLocatorList(volatile bool& isRunning) {
     }
   }
   LOGFINE("Ending updateLocatorList thread for pool %s", m_poolName.c_str());
-  return 0;
 }
 
-int ThinClientPoolDM::pingServer(volatile bool& isRunning) {
+void ThinClientPoolDM::pingServer(std::atomic<bool>& isRunning) {
   LOGFINE("Starting ping thread for pool %s", m_poolName.c_str());
   while (isRunning) {
     m_pingSema.acquire();
@@ -2105,10 +2110,9 @@ int ThinClientPoolDM::pingServer(volatile bool& isRunning) {
     }
   }
   LOGFINE("Ending ping thread for pool %s", m_poolName.c_str());
-  return 0;
 }
 
-int ThinClientPoolDM::cliCallback(volatile bool& isRunning) {
+void ThinClientPoolDM::cliCallback(std::atomic<bool>& isRunning) {
   LOGFINE("Starting cliCallback thread for pool %s", m_poolName.c_str());
   while (isRunning) {
     m_cliCallbackSema.acquire();
@@ -2125,7 +2129,6 @@ int ThinClientPoolDM::cliCallback(volatile bool& isRunning) {
     }
   }
   LOGFINE("Ending cliCallback thread for pool %s", m_poolName.c_str());
-  return 0;
 }
 
 int ThinClientPoolDM::doPing(const ACE_Time_Value&, const void*) {
@@ -2459,6 +2462,106 @@ TcrConnection* ThinClientPoolDM::getConnectionFromQueueW(
   return conn;
 }
 
+bool ThinClientPoolDM::checkDupAndAdd(std::shared_ptr<EventId> eventid) {
+  return m_connManager.checkDupAndAdd(eventid);
+}
+
+TcrEndpoint* ThinClientPoolDM::createEP(const char* endpointName) {
+  return new TcrPoolEndPoint(
+      endpointName, m_connManager.getCacheImpl(), m_connManager.m_failoverSema,
+      m_connManager.m_cleanupSema, m_connManager.m_redundancySema, this);
+}
+
+GfErrType FunctionExecution::execute(void) {
+  GuardUserAttributes gua;
+
+  if (m_userAttr) {
+    gua.setAuthenticatedView(m_userAttr->getAuthenticatedView());
+  }
+
+  std::string funcName(m_func);
+  TcrMessageExecuteFunction request(
+      new DataOutput(
+          m_poolDM->getConnectionManager().getCacheImpl()->createDataOutput()),
+      funcName, m_args, m_getResult, m_poolDM, m_timeout);
+  TcrMessageReply reply(true, m_poolDM);
+  ChunkedFunctionExecutionResponse* resultProcessor(
+      new ChunkedFunctionExecutionResponse(reply, (m_getResult & 2) == 2, *m_rc,
+                                           m_resultCollectorLock));
+  reply.setChunkedResultHandler(resultProcessor);
+  reply.setTimeout(m_timeout);
+  reply.setDM(m_poolDM);
+
+  LOGDEBUG(
+      "ThinClientPoolDM::sendRequestToAllServer sendRequest on endpoint[%s]!",
+      m_ep->name().c_str());
+
+  m_error = m_poolDM->sendRequestToEP(request, reply, m_ep);
+  m_error = m_poolDM->handleEPError(m_ep, reply, m_error);
+  if (m_error != GF_NOERR) {
+    if (m_error == GF_NOTCON || m_error == GF_IOERR) {
+      delete resultProcessor;
+      resultProcessor = nullptr;
+      return GF_NOERR;  // if server is unavailable its not an error for
+      // functionexec OnServers() case
+    }
+    LOGDEBUG("FunctionExecution::execute failed on endpoint[%s]!. Error = %d ",
+             m_ep->name().c_str(), m_error);
+    if (reply.getMessageType() == TcrMessage::EXCEPTION) {
+      exceptionPtr = CacheableString::create(reply.getException());
+    }
+
+    delete resultProcessor;
+    resultProcessor = nullptr;
+    return m_error;
+  } else if (reply.getMessageType() == TcrMessage::EXCEPTION ||
+             reply.getMessageType() == TcrMessage::EXECUTE_FUNCTION_ERROR) {
+    m_error = ThinClientRegion::handleServerException("Execute",
+                                                      reply.getException());
+    exceptionPtr = CacheableString::create(reply.getException());
+  }
+  if (resultProcessor->getResult() == true) {
+  }
+  delete resultProcessor;
+  resultProcessor = nullptr;
+  return m_error;
+}
+
+OnRegionFunctionExecution::OnRegionFunctionExecution(
+    std::string func, const Region* region, std::shared_ptr<Cacheable> args,
+    std::shared_ptr<CacheableHashSet> routingObj, uint8_t getResult,
+    std::chrono::milliseconds timeout, ThinClientPoolDM* poolDM,
+    const std::shared_ptr<std::recursive_mutex>& rCL,
+    std::shared_ptr<ResultCollector> rs,
+    std::shared_ptr<UserAttributes> userAttr, bool isBGThread,
+    const std::shared_ptr<BucketServerLocation>& serverLocation,
+    bool allBuckets)
+    : m_serverLocation(serverLocation),
+      m_isBGThread(isBGThread),
+      m_poolDM(poolDM),
+      m_func(func),
+      m_getResult(getResult),
+      m_timeout(timeout),
+      m_args(args),
+      m_routingObj(routingObj),
+      m_rc(rs),
+      m_resultCollectorLock(rCL),
+      m_userAttr(userAttr),
+      m_region(region),
+      m_allBuckets(allBuckets) {
+  m_request = new TcrMessageExecuteRegionFunctionSingleHop(
+      new DataOutput(
+          m_poolDM->getConnectionManager().getCacheImpl()->createDataOutput()),
+      m_func, m_region, m_args, m_routingObj, m_getResult, nullptr,
+      m_allBuckets, timeout, m_poolDM);
+  m_reply = new TcrMessageReply(true, m_poolDM);
+  m_resultCollector = new ChunkedFunctionExecutionResponse(
+      *m_reply, (m_getResult & 2) == 2, m_rc, m_resultCollectorLock);
+  m_reply->setChunkedResultHandler(m_resultCollector);
+  m_reply->setTimeout(m_timeout);
+  m_reply->setDM(m_poolDM);
+}
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/ThinClientPoolDM.hpp b/cppcache/src/ThinClientPoolDM.hpp
index ecd53c9..79e7506 100644
--- a/cppcache/src/ThinClientPoolDM.hpp
+++ b/cppcache/src/ThinClientPoolDM.hpp
@@ -27,14 +27,12 @@
 #include <string>
 #include <vector>
 
-#include <ace/Map_Manager.h>
 #include <ace/Recursive_Thread_Mutex.h>
 #include <ace/Semaphore.h>
 
 #include <geode/Pool.hpp>
 #include <geode/ResultCollector.hpp>
 
-#include "ClientMetadataService.hpp"
 #include "ExecutionImpl.hpp"
 #include "FairQueue.hpp"
 #include "NonCopyable.hpp"
@@ -64,20 +62,8 @@ namespace client {
 
 class CacheImpl;
 class FunctionExecution;
+class ClientMetadataService;
 
-/* adongre
- * CID 28731: Other violation (MISSING_COPY)
- * Class "apache::geode::client::ThinClientPoolDM" owns resources that are
- managed in its
- * constructor and destructor but has no user-written copy constructor.
- * FIX : Make the class no Copyablez
-
- * CID 28717: Other violation (MISSING_ASSIGN)
- * Class "apache::geode::client::ThinClientPoolDM" owns resources that are
- * managed in its constructor and destructor but has no user-written assignment
- operator.
- * Fix : Make the class Non Assinable
- */
 class ThinClientPoolDM
     : public ThinClientBaseDM,
       public Pool,
@@ -88,56 +74,49 @@ class ThinClientPoolDM
   ThinClientPoolDM(const char* name, std::shared_ptr<PoolAttributes> poolAttrs,
                    TcrConnectionManager& connManager);
 
-  virtual void init();
+  void init() override;
 
-  const std::string& getName() const { return m_poolName; }
+  const std::string& getName() const override { return m_poolName; }
 
-  virtual GfErrType sendSyncRequest(TcrMessage& request, TcrMessageReply& reply,
-                                    bool attemptFailover = true,
-                                    bool isBGThread = false);
-  GfErrType sendSyncRequest(
+  GfErrType sendSyncRequest(TcrMessage& request, TcrMessageReply& reply,
+                            bool attemptFailover = true,
+                            bool isBGThread = false) override;
+  virtual GfErrType sendSyncRequest(
       TcrMessage& request, TcrMessageReply& reply, bool attemptFailover,
       bool isBGThread,
       const std::shared_ptr<BucketServerLocation>& serverLocation);
 
   // Pool Specific Fns.
-  virtual const std::shared_ptr<CacheableStringArray> getLocators() const;
-  virtual const std::shared_ptr<CacheableStringArray> getServers();
-  virtual void destroy(bool keepalive = false);
-  virtual bool isDestroyed() const;
-  virtual std::shared_ptr<QueryService> getQueryService();
+  const std::shared_ptr<CacheableStringArray> getLocators() const override;
+  const std::shared_ptr<CacheableStringArray> getServers() override;
+  void destroy(bool keepalive = false) override;
+  bool isDestroyed() const override;
+  std::shared_ptr<QueryService> getQueryService() override;
   virtual std::shared_ptr<QueryService> getQueryServiceWithoutCheck();
-  virtual bool isEndpointAttached(TcrEndpoint* ep);
+  bool isEndpointAttached(TcrEndpoint* ep) override;
   GfErrType sendRequestToAllServers(
       const char* func, uint8_t getResult, std::chrono::milliseconds timeout,
       std::shared_ptr<Cacheable> args, std::shared_ptr<ResultCollector>& rs,
       std::shared_ptr<CacheableString>& exceptionPtr);
 
   GfErrType sendRequestToEP(const TcrMessage& request, TcrMessageReply& reply,
-                            TcrEndpoint* currentEndpoint);
+                            TcrEndpoint* currentEndpoint) override;
   void addConnection(TcrConnection* conn);
 
   TcrEndpoint* addEP(ServerLocation& serverLoc);
 
-  TcrEndpoint* addEP(const char* endpointName);
-  virtual int pingServer(volatile bool& isRunning);
-  virtual int updateLocatorList(volatile bool& isRunning);
-  virtual int cliCallback(volatile bool& isRunning);
+  TcrEndpoint* addEP(const std::string& endpointName);
+  virtual void pingServer(std::atomic<bool>& isRunning);
+  virtual void updateLocatorList(std::atomic<bool>& isRunning);
+  virtual void cliCallback(std::atomic<bool>& isRunning);
   virtual void pingServerLocal();
 
-  virtual ~ThinClientPoolDM() {
-    destroy();
-    _GEODE_SAFE_DELETE(m_locHelper);
-    _GEODE_SAFE_DELETE(m_stats);
-    _GEODE_SAFE_DELETE(m_clientMetadataService);
-    _GEODE_SAFE_DELETE(m_manager);
-  }
+  ~ThinClientPoolDM() override;
+
   // void updateQueue(const char* regionPath) ;
   ClientProxyMembershipID* getMembershipId() { return m_memId.get(); }
   virtual void processMarker(){};
-  virtual bool checkDupAndAdd(std::shared_ptr<EventId> eventid) {
-    return m_connManager.checkDupAndAdd(eventid);
-  }
+  bool checkDupAndAdd(std::shared_ptr<EventId> eventid) override;
   ACE_Recursive_Thread_Mutex& getPoolLock() { return getQueueLock(); }
   void reducePoolSize(int num);
   void removeEPConnections(int numConn, bool triggerManagerConn = true);
@@ -147,7 +126,7 @@ class ThinClientPoolDM
                                  bool& maxConnLimit,
                                  const TcrConnection* currentServer = nullptr);
   ThinClientLocatorHelper* getLocatorHelper() { return m_locHelper; }
-  virtual void releaseThreadLocalConnection();
+  void releaseThreadLocalConnection() override;
   virtual void setThreadLocalConnection(TcrConnection* conn);
   bool excludeConnection(TcrConnection*, std::set<ServerLocation>&);
   void incRegionCount();
@@ -162,15 +141,15 @@ class ThinClientPoolDM
   void updateNotificationStats(bool isDeltaSuccess,
                                std::chrono::nanoseconds timeInNanoSecond);
 
-  virtual bool isSecurityOn() { return m_isSecurityOn || m_isMultiUserMode; }
+  bool isSecurityOn() override { return m_isSecurityOn || m_isMultiUserMode; }
 
-  virtual bool isMultiUserMode() { return m_isMultiUserMode; }
+  bool isMultiUserMode() override { return m_isMultiUserMode; }
 
   virtual void sendUserCacheCloseMessage(bool keepAlive);
 
   virtual inline PoolStats& getStats() { return *m_stats; }
 
-  size_t getNumberOfEndPoints() const { return m_endpoints.current_size(); }
+  size_t getNumberOfEndPoints() const override { return m_endpoints.size(); }
 
   int32_t GetPDXIdForType(std::shared_ptr<Serializable> pdxType);
 
@@ -196,7 +175,7 @@ class ThinClientPoolDM
       int8_t& version, std::set<ServerLocation>& excludeServers);
 
   ClientMetadataService* getClientMetaDataService() {
-    return m_clientMetadataService;
+    return m_clientMetadataService.get();
   }
   void setPrimaryServerQueueSize(int queueSize) {
     m_primaryServerQueueSize = queueSize;
@@ -206,15 +185,14 @@ class ThinClientPoolDM
  protected:
   ThinClientStickyManager* m_manager;
   std::vector<std::string> m_canonicalHosts;
-  ACE_Map_Manager<std::string, TcrEndpoint*, ACE_Recursive_Thread_Mutex>
+  synchronized_map<std::unordered_map<std::string, TcrEndpoint*>,
+                   std::recursive_mutex>
       m_endpoints;
   std::recursive_mutex m_endpointsLock;
   std::recursive_mutex m_endpointSelectionLock;
   std::string m_poolName;
   PoolStats* m_stats;
   bool m_sticky;
-  // PoolStats * m_stats;
-  // PoolStatType* m_poolStatType;
   void netDown();
   ACE_Semaphore m_updateLocatorListSema;
   ACE_Semaphore m_pingSema;
@@ -228,7 +206,7 @@ class ThinClientPoolDM
   virtual void stopPingThread();
   virtual void stopUpdateLocatorListThread();
   virtual void stopCliCallbackThread();
-  virtual void cleanStickyConnections(volatile bool& isRunning);
+  virtual void cleanStickyConnections(std::atomic<bool>& isRunning);
   virtual TcrConnection* getConnectionFromQueue(bool timeout, GfErrType* error,
                                                 std::set<ServerLocation>&,
                                                 bool& maxConnLimit);
@@ -268,7 +246,7 @@ class ThinClientPoolDM
       std::shared_ptr<UserAttributes> userAttribute);
 
   // get endpoint using the endpoint string
-  TcrEndpoint* getEndPoint(std::string epNameStr);
+  TcrEndpoint* getEndpoint(const std::string& epNameStr);
 
   bool m_isSecurityOn;
   bool m_isMultiUserMode;
@@ -291,18 +269,13 @@ class ThinClientPoolDM
                               std::set<ServerLocation>& excludeServers,
                               bool& maxConnLimit);
   bool exclude(TcrConnection* conn, std::set<ServerLocation>& excludeServers);
-  void deleteAction() { removeEPConnections(1); }
+  void deleteAction() override { removeEPConnections(1); }
 
   std::string selectEndpoint(std::set<ServerLocation>&,
                              const TcrConnection* currentServer = nullptr);
   // TODO global - m_memId was volatile
   std::unique_ptr<ClientProxyMembershipID> m_memId;
-  virtual TcrEndpoint* createEP(const char* endpointName) {
-    return new TcrPoolEndPoint(endpointName, m_connManager.getCacheImpl(),
-                               m_connManager.m_failoverSema,
-                               m_connManager.m_cleanupSema,
-                               m_connManager.m_redundancySema, this);
-  }
+  virtual TcrEndpoint* createEP(const char* endpointName);
   virtual void removeCallbackConnection(TcrEndpoint*) {}
 
   bool excludeServer(std::string, std::set<ServerLocation>&);
@@ -317,23 +290,23 @@ class ThinClientPoolDM
 
   // Manage Connection thread
   ACE_Semaphore m_connSema;
-  Task<ThinClientPoolDM>* m_connManageTask;
-  Task<ThinClientPoolDM>* m_pingTask;
-  Task<ThinClientPoolDM>* m_updateLocatorListTask;
-  Task<ThinClientPoolDM>* m_cliCallbackTask;
+  std::unique_ptr<Task<ThinClientPoolDM>> m_connManageTask;
+  std::unique_ptr<Task<ThinClientPoolDM>> m_pingTask;
+  std::unique_ptr<Task<ThinClientPoolDM>> m_updateLocatorListTask;
+  std::unique_ptr<Task<ThinClientPoolDM>> m_cliCallbackTask;
   ExpiryTaskManager::id_type m_pingTaskId;
   ExpiryTaskManager::id_type m_updateLocatorListTaskId;
   ExpiryTaskManager::id_type m_connManageTaskId;
-  int manageConnections(volatile bool& isRunning);
+  void manageConnections(std::atomic<bool>& isRunning);
   int doPing(const ACE_Time_Value&, const void*);
   int doUpdateLocatorList(const ACE_Time_Value&, const void*);
   int doManageConnections(const ACE_Time_Value&, const void*);
-  int manageConnectionsInternal(volatile bool& isRunning);
-  void cleanStaleConnections(volatile bool& isRunning);
-  void restoreMinConnections(volatile bool& isRunning);
+  void manageConnectionsInternal(std::atomic<bool>& isRunning);
+  void cleanStaleConnections(std::atomic<bool>& isRunning);
+  void restoreMinConnections(std::atomic<bool>& isRunning);
   std::atomic<int32_t> m_clientOps;  // Actual Size of Pool
-  statistics::PoolStatsSampler* m_PoolStatsSampler;
-  ClientMetadataService* m_clientMetadataService;
+  std::unique_ptr<statistics::PoolStatsSampler> m_PoolStatsSampler;
+  std::unique_ptr<ClientMetadataService> m_clientMetadataService;
   friend class CacheImpl;
   friend class ThinClientStickyManager;
   friend class FunctionExecution;
@@ -389,67 +362,9 @@ class FunctionExecution : public PooledWork<GfErrType> {
     m_ep = ep;
     m_poolDM = poolDM;
     m_userAttr = userAttr;
-
-    // m_functionExecutionTask = new Task<FunctionExecution>(this,
-    //&FunctionExecution::execute);
   }
 
-  GfErrType execute(void) {
-    GuardUserAttributes gua;
-
-    if (m_userAttr) {
-      gua.setAuthenticatedView(m_userAttr->getAuthenticatedView());
-    }
-
-    std::string funcName(m_func);
-    TcrMessageExecuteFunction request(
-        new DataOutput(m_poolDM->getConnectionManager()
-                           .getCacheImpl()
-                           ->createDataOutput()),
-        funcName, m_args, m_getResult, m_poolDM, m_timeout);
-    TcrMessageReply reply(true, m_poolDM);
-    ChunkedFunctionExecutionResponse* resultProcessor(
-        new ChunkedFunctionExecutionResponse(reply, (m_getResult & 2) == 2,
-                                             *m_rc, m_resultCollectorLock));
-    reply.setChunkedResultHandler(resultProcessor);
-    reply.setTimeout(m_timeout);
-    reply.setDM(m_poolDM);
-
-    LOGDEBUG(
-        "ThinClientPoolDM::sendRequestToAllServer sendRequest on endpoint[%s]!",
-        m_ep->name().c_str());
-
-    m_error = m_poolDM->sendRequestToEP(request, reply, m_ep);
-    m_error = m_poolDM->handleEPError(m_ep, reply, m_error);
-    if (m_error != GF_NOERR) {
-      if (m_error == GF_NOTCON || m_error == GF_IOERR) {
-        delete resultProcessor;
-        resultProcessor = nullptr;
-        return GF_NOERR;  // if server is unavailable its not an error for
-                          // functionexec OnServers() case
-      }
-      LOGDEBUG(
-          "FunctionExecution::execute failed on endpoint[%s]!. Error = %d ",
-          m_ep->name().c_str(), m_error);
-      if (reply.getMessageType() == TcrMessage::EXCEPTION) {
-        exceptionPtr = CacheableString::create(reply.getException());
-      }
-
-      delete resultProcessor;
-      resultProcessor = nullptr;
-      return m_error;
-    } else if (reply.getMessageType() == TcrMessage::EXCEPTION ||
-               reply.getMessageType() == TcrMessage::EXECUTE_FUNCTION_ERROR) {
-      m_error = ThinClientRegion::handleServerException("Execute",
-                                                        reply.getException());
-      exceptionPtr = CacheableString::create(reply.getException());
-    }
-    if (resultProcessor->getResult() == true) {
-    }
-    delete resultProcessor;
-    resultProcessor = nullptr;
-    return m_error;
-  }
+  GfErrType execute(void);
 };
 
 class OnRegionFunctionExecution : public PooledWork<GfErrType> {
@@ -479,33 +394,7 @@ class OnRegionFunctionExecution : public PooledWork<GfErrType> {
       std::shared_ptr<ResultCollector> rs,
       std::shared_ptr<UserAttributes> userAttr, bool isBGThread,
       const std::shared_ptr<BucketServerLocation>& serverLocation,
-      bool allBuckets)
-      : m_serverLocation(serverLocation),
-        m_isBGThread(isBGThread),
-        m_poolDM(poolDM),
-        m_func(func),
-        m_getResult(getResult),
-        m_timeout(timeout),
-        m_args(args),
-        m_routingObj(routingObj),
-        m_rc(rs),
-        m_resultCollectorLock(rCL),
-        m_userAttr(userAttr),
-        m_region(region),
-        m_allBuckets(allBuckets) {
-    m_request = new TcrMessageExecuteRegionFunctionSingleHop(
-        new DataOutput(m_poolDM->getConnectionManager()
-                           .getCacheImpl()
-                           ->createDataOutput()),
-        m_func, m_region, m_args, m_routingObj, m_getResult, nullptr,
-        m_allBuckets, timeout, m_poolDM);
-    m_reply = new TcrMessageReply(true, m_poolDM);
-    m_resultCollector = new ChunkedFunctionExecutionResponse(
-        *m_reply, (m_getResult & 2) == 2, m_rc, m_resultCollectorLock);
-    m_reply->setChunkedResultHandler(m_resultCollector);
-    m_reply->setTimeout(m_timeout);
-    m_reply->setDM(m_poolDM);
-  }
+      bool allBuckets);
 
   ~OnRegionFunctionExecution() {
     delete m_request;
diff --git a/cppcache/src/ThinClientPoolHADM.cpp b/cppcache/src/ThinClientPoolHADM.cpp
index 2caf6d0..244fd2e 100644
--- a/cppcache/src/ThinClientPoolHADM.cpp
+++ b/cppcache/src/ThinClientPoolHADM.cpp
@@ -20,6 +20,7 @@
 #include <geode/SystemProperties.hpp>
 
 #include "ExpiryHandler_T.hpp"
+#include "TcrConnectionManager.hpp"
 #include "util/exception.hpp"
 
 namespace apache {
@@ -80,8 +81,9 @@ void ThinClientPoolHADM::startBackgroundThreads() {
   }
 
   m_redundancyManager->startPeriodicAck();
-  m_redundancyTask = new Task<ThinClientPoolHADM>(
-      this, &ThinClientPoolHADM::redundancy, NC_Redundancy);
+  m_redundancyTask =
+      std::unique_ptr<Task<ThinClientPoolHADM>>(new Task<ThinClientPoolHADM>(
+          this, &ThinClientPoolHADM::redundancy, NC_Redundancy));
   m_redundancyTask->start();
 }
 
@@ -142,7 +144,7 @@ bool ThinClientPoolHADM::postFailoverAction(TcrEndpoint*) {
   return true;
 }
 
-int ThinClientPoolHADM::redundancy(volatile bool& isRunning) {
+void ThinClientPoolHADM::redundancy(std::atomic<bool>& isRunning) {
   LOGFINE("ThinClientPoolHADM: Starting maintain redundancy thread.");
   while (isRunning) {
     m_redundancySema.acquire();
@@ -154,7 +156,6 @@ int ThinClientPoolHADM::redundancy(volatile bool& isRunning) {
     }
   }
   LOGFINE("ThinClientPoolHADM: Ending maintain redundancy thread.");
-  return 0;
 }
 
 int ThinClientPoolHADM::checkRedundancy(const ACE_Time_Value&, const void*) {
@@ -194,7 +195,7 @@ void ThinClientPoolHADM::sendNotificationCloseMsgs() {
     m_redundancyTask->stopNoblock();
     m_redundancySema.release();
     m_redundancyTask->wait();
-    _GEODE_SAFE_DELETE(m_redundancyTask);
+    m_redundancyTask = nullptr;
     m_redundancyManager->sendNotificationCloseMsgs();
   }
 }
@@ -216,6 +217,36 @@ GfErrType ThinClientPoolHADM::registerInterestAllRegions(
   return err;
 }
 
+bool ThinClientPoolHADM::checkDupAndAdd(std::shared_ptr<EventId> eventid) {
+  return m_redundancyManager->checkDupAndAdd(eventid);
+}
+
+void ThinClientPoolHADM::processMarker() {
+  // also set the static bool m_processedMarker for makePrimary messages
+  m_redundancyManager->m_globalProcessedMarker = true;
+}
+
+void ThinClientPoolHADM::acquireRedundancyLock() {
+  m_redundancyManager->acquireRedundancyLock();
+}
+
+void ThinClientPoolHADM::releaseRedundancyLock() {
+  m_redundancyManager->releaseRedundancyLock();
+}
+
+std::recursive_mutex& ThinClientPoolHADM::getRedundancyLock() {
+  return m_redundancyManager->getRedundancyLock();
+}
+
+GfErrType ThinClientPoolHADM::sendRequestToPrimary(TcrMessage& request,
+                                                   TcrMessageReply& reply) {
+  return m_redundancyManager->sendRequestToPrimary(request, reply);
+}
+
+bool ThinClientPoolHADM::isReadyForEvent() const {
+  return m_redundancyManager->isSentReadyForEvents();
+}
+
 void ThinClientPoolHADM::addRegion(ThinClientRegion* theTCR) {
   std::lock_guard<decltype(m_regionsLock)> guard(m_regionsLock);
   m_regions.push_back(theTCR);
@@ -258,7 +289,7 @@ void ThinClientPoolHADM::netDown() {
   {
     std::lock_guard<decltype(m_endpointsLock)> guard(m_endpointsLock);
     for (auto&& currItr : m_endpoints) {
-      currItr.int_id_->setConnectionStatus(false);
+      currItr.second->setConnectionStatus(false);
     }
   }
 
@@ -283,6 +314,12 @@ void ThinClientPoolHADM::sendNotConMesToAllregions() {
   }
 }
 
+TcrEndpoint* ThinClientPoolHADM::createEP(const char* endpointName) {
+  return new TcrPoolEndPoint(
+      endpointName, m_connManager.getCacheImpl(), m_connManager.m_failoverSema,
+      m_connManager.m_cleanupSema, m_redundancySema, this);
+}
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/ThinClientPoolHADM.hpp b/cppcache/src/ThinClientPoolHADM.hpp
index 7ebd9cb..a430b3f 100644
--- a/cppcache/src/ThinClientPoolHADM.hpp
+++ b/cppcache/src/ThinClientPoolHADM.hpp
@@ -20,10 +20,12 @@
 #ifndef GEODE_THINCLIENTPOOLHADM_H_
 #define GEODE_THINCLIENTPOOLHADM_H_
 
+#include <atomic>
+#include <memory>
 #include <mutex>
 
 #include "PoolAttributes.hpp"
-#include "TcrConnectionManager.hpp"
+#include "Task.hpp"
 #include "ThinClientHARegion.hpp"
 #include "ThinClientPoolDM.hpp"
 
@@ -31,18 +33,22 @@ namespace apache {
 namespace geode {
 namespace client {
 
+class TcrConnectionManager;
+class ThinClientRedundancyManager;
+
 class ThinClientPoolHADM : public ThinClientPoolDM {
  public:
   ThinClientPoolHADM(const char* name, std::shared_ptr<PoolAttributes> poolAttr,
                      TcrConnectionManager& connManager);
+  ThinClientPoolHADM(const ThinClientPoolHADM&) = delete;
+  ThinClientPoolHADM& operator=(const ThinClientPoolHADM&) = delete;
+  ~ThinClientPoolHADM() override { destroy(); }
 
-  void init();
-
-  virtual ~ThinClientPoolHADM() { destroy(); }
+  void init() override;
 
-  virtual GfErrType sendSyncRequest(TcrMessage& request, TcrMessageReply& reply,
-                                    bool attemptFailover = true,
-                                    bool isBGThread = false);
+  GfErrType sendSyncRequest(TcrMessage& request, TcrMessageReply& reply,
+                            bool attemptFailover = true,
+                            bool isBGThread = false) override;
 
   bool registerInterestForHARegion(TcrEndpoint* ep, const TcrMessage* request,
                                    ThinClientHARegion& region);
@@ -50,55 +56,43 @@ class ThinClientPoolHADM : public ThinClientPoolDM {
   GfErrType sendSyncRequestRegisterInterestEP(TcrMessage& request,
                                               TcrMessageReply& reply,
                                               bool attemptFailover,
-                                              TcrEndpoint* endpoint);
+                                              TcrEndpoint* endpoint) override;
 
   GfErrType registerInterestAllRegions(TcrEndpoint* ep,
                                        const TcrMessage* request,
                                        TcrMessageReply* reply);
 
-  virtual void destroy(bool keepAlive = false);
+  virtual void destroy(bool keepAlive = false) override;
 
   void readyForEvents();
 
   void sendNotificationCloseMsgs();
 
-  bool checkDupAndAdd(std::shared_ptr<EventId> eventid) {
-    return m_redundancyManager->checkDupAndAdd(eventid);
-  }
+  bool checkDupAndAdd(std::shared_ptr<EventId> eventid) override;
 
-  void processMarker() {
-    // also set the static bool m_processedMarker for makePrimary messages
-    m_redundancyManager->m_globalProcessedMarker = true;
-  }
+  void processMarker() override;
 
   void netDown();
 
-  void pingServerLocal();
+  void pingServerLocal() override;
+
+  void acquireRedundancyLock() override;
 
-  virtual void acquireRedundancyLock() {
-    m_redundancyManager->acquireRedundancyLock();
-  };
-  virtual void releaseRedundancyLock() {
-    m_redundancyManager->releaseRedundancyLock();
-  };
-  virtual std::recursive_mutex& getRedundancyLock() {
-    return m_redundancyManager->getRedundancyLock();
-  }
+  void releaseRedundancyLock() override;
 
-  GfErrType sendRequestToPrimary(TcrMessage& request, TcrMessageReply& reply) {
-    return m_redundancyManager->sendRequestToPrimary(request, reply);
-  }
+  std::recursive_mutex& getRedundancyLock() override;
 
-  virtual void triggerRedundancyThread() { m_redundancySema.release(); }
+  GfErrType sendRequestToPrimary(TcrMessage& request, TcrMessageReply& reply);
 
-  bool isReadyForEvent() const {
-    return m_redundancyManager->isSentReadyForEvents();
-  }
+  void triggerRedundancyThread() override { m_redundancySema.release(); }
+
+  bool isReadyForEvent() const;
 
  protected:
-  virtual GfErrType sendSyncRequestRegisterInterest(
+  GfErrType sendSyncRequestRegisterInterest(
       TcrMessage& request, TcrMessageReply& reply, bool attemptFailover = true,
-      ThinClientRegion* region = nullptr, TcrEndpoint* endpoint = nullptr);
+      ThinClientRegion* region = nullptr,
+      TcrEndpoint* endpoint = nullptr) override;
 
   virtual GfErrType sendSyncRequestCq(TcrMessage& request,
                                       TcrMessageReply& reply);
@@ -107,33 +101,23 @@ class ThinClientPoolHADM : public ThinClientPoolDM {
 
   virtual bool postFailoverAction(TcrEndpoint* endpoint);
 
-  virtual void startBackgroundThreads();
+  void startBackgroundThreads() override;
 
  private:
-  // Disallow copy constructor and assignment operator.
   ThinClientRedundancyManager* m_redundancyManager;
-  ThinClientPoolHADM(const ThinClientPoolHADM&);
-  ThinClientPoolHADM& operator=(const ThinClientPoolHADM&) = delete;
 
   TcrConnectionManager& m_theTcrConnManager;
   ACE_Semaphore m_redundancySema;
-  Task<ThinClientPoolHADM>* m_redundancyTask;
+  std::unique_ptr<Task<ThinClientPoolHADM>> m_redundancyTask;
+
+  void redundancy(std::atomic<bool>& isRunning);
 
-  int redundancy(volatile bool& isRunning);
-  /*
-  void stopNotificationThreads();
-  */
   ExpiryTaskManager::id_type m_servermonitorTaskId;
   int checkRedundancy(const ACE_Time_Value&, const void*);
 
-  virtual TcrEndpoint* createEP(const char* endpointName) {
-    return new TcrPoolEndPoint(endpointName, m_connManager.getCacheImpl(),
-                               m_connManager.m_failoverSema,
-                               m_connManager.m_cleanupSema, m_redundancySema,
-                               this);
-  }
+  TcrEndpoint* createEP(const char* endpointName) override;
 
-  void removeCallbackConnection(TcrEndpoint*);
+  void removeCallbackConnection(TcrEndpoint*) override;
 
   std::list<ThinClientRegion*> m_regions;
   std::recursive_mutex m_regionsLock;
diff --git a/cppcache/src/ThinClientPoolStickyDM.cpp b/cppcache/src/ThinClientPoolStickyDM.cpp
index be61f56..ac7c29b 100644
--- a/cppcache/src/ThinClientPoolStickyDM.cpp
+++ b/cppcache/src/ThinClientPoolStickyDM.cpp
@@ -131,7 +131,8 @@ void ThinClientPoolStickyDM::setStickyNull(bool isBGThread) {
   }
 }
 
-void ThinClientPoolStickyDM::cleanStickyConnections(volatile bool& isRunning) {
+void ThinClientPoolStickyDM::cleanStickyConnections(
+    std::atomic<bool>& isRunning) {
   if (!isRunning) {
     return;
   }
diff --git a/cppcache/src/ThinClientPoolStickyDM.hpp b/cppcache/src/ThinClientPoolStickyDM.hpp
index b46d358..869c212 100644
--- a/cppcache/src/ThinClientPoolStickyDM.hpp
+++ b/cppcache/src/ThinClientPoolStickyDM.hpp
@@ -32,26 +32,23 @@ class ThinClientPoolStickyDM : public ThinClientPoolDM {
       : ThinClientPoolDM(name, poolAttrs, connManager) {
     m_sticky = true;
   }
-  virtual ~ThinClientPoolStickyDM() {
-    // m_manager->closeAllStickyConnections();
-    // delete m_manager; m_manager = nullptr;
-  }
-  virtual bool canItBeDeletedNoImpl(TcrConnection* conn);
+  ~ThinClientPoolStickyDM() override {}
+
+  bool canItBeDeletedNoImpl(TcrConnection* conn) override;
 
  protected:
-  virtual void cleanStickyConnections(volatile bool& isRunning);
-  virtual TcrConnection* getConnectionFromQueueW(
+  void cleanStickyConnections(std::atomic<bool>& isRunning) override;
+  TcrConnection* getConnectionFromQueueW(
       GfErrType* error, std::set<ServerLocation>&, bool isBGThread,
       TcrMessage& request, int8_t& version, bool& match, bool& connFound,
-      const std::shared_ptr<BucketServerLocation>& serverLocation = nullptr);
-  virtual void putInQueue(TcrConnection* conn, bool isBGThread,
-                          bool isTransaction = false);
-  virtual void setStickyNull(bool isBGThread);
-  virtual bool canItBeDeleted(TcrConnection* conn);
-  virtual void releaseThreadLocalConnection();
-  virtual void setThreadLocalConnection(TcrConnection* conn);
-
-  // virtual void cleanStickyConnections(volatile bool& isRunning);
+      const std::shared_ptr<BucketServerLocation>& serverLocation =
+          nullptr) override;
+  void putInQueue(TcrConnection* conn, bool isBGThread,
+                  bool isTransaction = false) override;
+  void setStickyNull(bool isBGThread) override;
+  bool canItBeDeleted(TcrConnection* conn) override;
+  void releaseThreadLocalConnection() override;
+  void setThreadLocalConnection(TcrConnection* conn) override;
 };
 }  // namespace client
 }  // namespace geode
diff --git a/cppcache/src/ThinClientRedundancyManager.cpp b/cppcache/src/ThinClientRedundancyManager.cpp
index 8830762..75fe357 100644
--- a/cppcache/src/ThinClientRedundancyManager.cpp
+++ b/cppcache/src/ThinClientRedundancyManager.cpp
@@ -724,7 +724,7 @@ void ThinClientRedundancyManager::close() {
     m_periodicAckTask->stopNoblock();
     m_periodicAckSema.release();
     m_periodicAckTask->wait();
-    _GEODE_SAFE_DELETE(m_periodicAckTask);
+    m_periodicAckTask = nullptr;
   }
 
   std::lock_guard<decltype(m_redundantEndpointsLock)> guard(
@@ -983,8 +983,9 @@ GfErrType ThinClientRedundancyManager::sendSyncRequestRegisterInterest(
   }
 }
 
-void ThinClientRedundancyManager::getAllEndpoints(
-    std::vector<TcrEndpoint*>& endpoints) {
+synchronized_map<std::unordered_map<std::string, TcrEndpoint*>,
+                 std::recursive_mutex>&
+ThinClientRedundancyManager::updateAndSelectEndpoints() {
   // 38196 Fix: For durable clients reconnect
   // 1. Get list of endpoints which have HA queue.
   // 2. Get HA endpoint with max queuesize;
@@ -997,33 +998,35 @@ void ThinClientRedundancyManager::getAllEndpoints(
   // Exception: For R =0 ( or when no EP with Max queuesize ),
   //  Old primary would be considered as new. Hence it would be at the end
 
-  ACE_Map_Manager<std::string, TcrEndpoint*, ACE_Recursive_Thread_Mutex>*
-      tempContainer;
   if (m_poolHADM) {
-    tempContainer = &m_poolHADM->m_endpoints;
     // fetch queue servers
     // send queue servers for sorting
     std::set<ServerLocation> exclEndPts;
     std::list<ServerLocation> outEndpoints;
 
     outEndpoints = selectServers(-1, exclEndPts);
-    for (std::list<ServerLocation>::iterator it = outEndpoints.begin();
-         it != outEndpoints.end(); it++) {
-      m_poolHADM->addEP(*it);
+    for (auto& it : outEndpoints) {
+      m_poolHADM->addEP(it);
     }
+
+    return m_poolHADM->m_endpoints;
   } else {
-    tempContainer = &m_theTcrConnManager->m_endpoints;
+    return m_theTcrConnManager->m_endpoints;
   }
+}
 
+void ThinClientRedundancyManager::getAllEndpoints(
+    std::vector<TcrEndpoint*>& endpoints) {
   TcrEndpoint* maxQEp = nullptr;
   TcrEndpoint* primaryEp = nullptr;
 
-  for (auto& currItr : *tempContainer) {
+  auto& selectedEndpoints = updateAndSelectEndpoints();
+  for (const auto& currItr : selectedEndpoints) {
     if (isDurable()) {
-      auto ep = currItr.int_id_;
+      auto ep = currItr.second;
       int32_t queueSize = 0;
       TcrConnection* statusConn = nullptr;
-      ServerQueueStatus status =
+      auto status =
           ep->getFreshServerQueueStatus(queueSize, !m_poolHADM, statusConn);
       if (m_poolHADM && status != NON_REDUNDANT_SERVER) {
         m_poolHADM->addConnection(statusConn);
@@ -1048,13 +1051,13 @@ void ThinClientRedundancyManager::getAllEndpoints(
             "ThinClientRedundancyManager::getAllEndpoints(): sorting "
             "endpoints, found primary endpoint.");
       } else {
-        endpoints.push_back(currItr.int_id_);
+        endpoints.push_back(currItr.second);
         LOGDEBUG(
             "ThinClientRedundancyManager::getAllEndpoints(): sorting "
             "endpoints, found nonredundant endpoint.");
       }
     } else {
-      endpoints.push_back(currItr.int_id_);
+      endpoints.push_back(currItr.second);
     }
     //(*currItr)++;
   }
@@ -1160,7 +1163,7 @@ int ThinClientRedundancyManager::processEventIdMap(const ACE_Time_Value&,
   return 0;
 }
 
-int ThinClientRedundancyManager::periodicAck(volatile bool& isRunning) {
+void ThinClientRedundancyManager::periodicAck(std::atomic<bool>& isRunning) {
   while (isRunning) {
     m_periodicAckSema.acquire();
     if (isRunning) {
@@ -1170,7 +1173,6 @@ int ThinClientRedundancyManager::periodicAck(volatile bool& isRunning) {
       }
     }
   }
-  return 0;
 }
 
 void ThinClientRedundancyManager::doPeriodicAck() {
@@ -1239,8 +1241,9 @@ void ThinClientRedundancyManager::doPeriodicAck() {
 }
 
 void ThinClientRedundancyManager::startPeriodicAck() {
-  m_periodicAckTask = new Task<ThinClientRedundancyManager>(
-      this, &ThinClientRedundancyManager::periodicAck, NC_PerodicACK);
+  m_periodicAckTask = std::unique_ptr<Task<ThinClientRedundancyManager>>(
+      new Task<ThinClientRedundancyManager>(
+          this, &ThinClientRedundancyManager::periodicAck, NC_PerodicACK));
   m_periodicAckTask->start();
   const auto& props = m_theTcrConnManager->getCacheImpl()
                           ->getDistributedSystem()
diff --git a/cppcache/src/ThinClientRedundancyManager.hpp b/cppcache/src/ThinClientRedundancyManager.hpp
index 39e1bce..5794e50 100644
--- a/cppcache/src/ThinClientRedundancyManager.hpp
+++ b/cppcache/src/ThinClientRedundancyManager.hpp
@@ -20,17 +20,23 @@
 #ifndef GEODE_THINCLIENTREDUNDANCYMANAGER_H_
 #define GEODE_THINCLIENTREDUNDANCYMANAGER_H_
 
+#include <atomic>
 #include <chrono>
 #include <list>
+#include <memory>
 #include <mutex>
 #include <set>
 #include <string>
 
+#include <ace/ACE.h>
+
+#include "ErrType.hpp"
 #include "EventIdMap.hpp"
 #include "ExpiryTaskManager.hpp"
 #include "ServerLocation.hpp"
-#include "TcrEndpoint.hpp"
+#include "Task.hpp"
 #include "TcrMessage.hpp"
+#include "util/synchronized_map.hpp"
 
 namespace apache {
 namespace geode {
@@ -40,6 +46,7 @@ class TcrConnectionManager;
 class TcrHADistributionManager;
 class ThinClientRegion;
 class ThinClientPoolHADM;
+class TcrEndpoint;
 
 class ThinClientRedundancyManager {
  public:
@@ -113,6 +120,10 @@ class ThinClientRedundancyManager {
   void moveEndpointToLast(std::vector<TcrEndpoint*>& epVector,
                           TcrEndpoint* targetEp);
 
+  synchronized_map<std::unordered_map<std::string, TcrEndpoint*>,
+                   std::recursive_mutex>&
+  updateAndSelectEndpoints();
+
   void getAllEndpoints(std::vector<TcrEndpoint*>& endpoints);
   // For 38196 Fix: Reorder End points.
   void insertEPInQueueSizeOrder(TcrEndpoint* ep,
@@ -125,12 +136,12 @@ class ThinClientRedundancyManager {
 
   inline bool isDurable();
   int processEventIdMap(const ACE_Time_Value&, const void*);
-  Task<ThinClientRedundancyManager>* m_periodicAckTask;
+  std::unique_ptr<Task<ThinClientRedundancyManager>> m_periodicAckTask;
   ACE_Semaphore m_periodicAckSema;
   ExpiryTaskManager::id_type
       m_processEventIdMapTaskId;  // periodic check eventid map for notify ack
                                   // and/or expiry
-  int periodicAck(volatile bool& isRunning);
+  void periodicAck(std::atomic<bool>& isRunning);
   void doPeriodicAck();
   time_point m_nextAck;                    // next ack time
   std::chrono::milliseconds m_nextAckInc;  // next ack time increment
diff --git a/cppcache/src/ThinClientRegion.cpp b/cppcache/src/ThinClientRegion.cpp
index 6d10ce9..ef82072 100644
--- a/cppcache/src/ThinClientRegion.cpp
+++ b/cppcache/src/ThinClientRegion.cpp
@@ -34,6 +34,7 @@
 #include "ReadWriteLock.hpp"
 #include "RegionGlobalLocks.hpp"
 #include "RemoteQuery.hpp"
+#include "TcrConnectionManager.hpp"
 #include "TcrDistributionManager.hpp"
 #include "TcrEndpoint.hpp"
 #include "ThinClientBaseDM.hpp"
@@ -347,7 +348,6 @@ ThinClientRegion::ThinClientRegion(
     : LocalRegion(name, cacheImpl, rPtr, attributes, stats, shared),
       m_tcrdm(nullptr),
       m_notifyRelease(false),
-      m_notificationSema(1),
       m_isMetaDataRefreshed(false) {
   m_transactionEnabled = true;
   m_isDurableClnt = !cacheImpl->getDistributedSystem()
@@ -1318,9 +1318,8 @@ GfErrType ThinClientRegion::singleHopPutAllNoThrow_remote(
    * method.
    *  e. insert the worker into the vector.
    */
-  std::vector<PutAllWork*> putAllWorkers;
-  auto threadPool =
-      CacheRegionHelper::getCacheImpl(&getCache())->getThreadPool();
+  std::vector<std::shared_ptr<PutAllWork>> putAllWorkers;
+  auto& threadPool = m_cacheImpl->getThreadPool();
   int locationMapIndex = 0;
   for (const auto& locationIter : *locationMap) {
     const auto& serverLocation = locationIter.first;
@@ -1340,10 +1339,10 @@ GfErrType ThinClientRegion::singleHopPutAllNoThrow_remote(
       }
     }
 
-    auto worker = new PutAllWork(tcrdm, serverLocation, region,
-                                 true /*attemptFailover*/, false /*isBGThread*/,
-                                 filteredMap, keys, timeout, aCallbackArgument);
-    threadPool->perform(worker);
+    auto worker = std::make_shared<PutAllWork>(
+        tcrdm, serverLocation, region, true /*attemptFailover*/,
+        false /*isBGThread*/, filteredMap, keys, timeout, aCallbackArgument);
+    threadPool.perform(worker);
     putAllWorkers.push_back(worker);
     locationMapIndex++;
   }
@@ -1407,7 +1406,6 @@ GfErrType ThinClientRegion::singleHopPutAllNoThrow_remote(
     }
     */
 
-    delete worker;
     cnt++;
   }
   /**
@@ -1698,9 +1696,8 @@ GfErrType ThinClientRegion::singleHopRemoveAllNoThrow_remote(
    * method.
    *  e. insert the worker into the vector.
    */
-  std::vector<RemoveAllWork*> removeAllWorkers;
-  auto* threadPool =
-      CacheRegionHelper::getCacheImpl(&getCache())->getThreadPool();
+  std::vector<std::shared_ptr<RemoveAllWork>> removeAllWorkers;
+  auto& threadPool = m_cacheImpl->getThreadPool();
   int locationMapIndex = 0;
   for (const auto& locationIter : *locationMap) {
     const auto& serverLocation = locationIter.first;
@@ -1708,10 +1705,10 @@ GfErrType ThinClientRegion::singleHopRemoveAllNoThrow_remote(
       LOGDEBUG("serverLocation is nullptr");
     }
     const auto& mappedkeys = locationIter.second;
-    auto worker = new RemoveAllWork(
+    auto worker = std::make_shared<RemoveAllWork>(
         tcrdm, serverLocation, region, true /*attemptFailover*/,
         false /*isBGThread*/, mappedkeys, aCallbackArgument);
-    threadPool->perform(worker);
+    threadPool.perform(worker);
     removeAllWorkers.push_back(worker);
     locationMapIndex++;
   }
@@ -1762,7 +1759,6 @@ GfErrType ThinClientRegion::singleHopRemoveAllNoThrow_remote(
         "worker->getResultCollector()->getList()->getVersionedTagsize() = %d ",
         worker->getResultCollector()->getList()->getVersionedTagsize());
 
-    delete worker;
     cnt++;
   }
   /**
@@ -2797,6 +2793,7 @@ GfErrType ThinClientRegion::handleServerException(const char* func,
 }
 
 void ThinClientRegion::receiveNotification(TcrMessage* msg) {
+  std::unique_lock<std::mutex> lock(m_notificationMutex, std::defer_lock);
   {
     TryReadGuard guard(m_rwLock, m_destroyPending);
     if (m_destroyPending) {
@@ -2805,7 +2802,7 @@ void ThinClientRegion::receiveNotification(TcrMessage* msg) {
       }
       return;
     }
-    m_notificationSema.acquire();
+    lock.lock();
   }
 
   if (msg->getMessageType() == TcrMessage::CLIENT_MARKER) {
@@ -2814,7 +2811,7 @@ void ThinClientRegion::receiveNotification(TcrMessage* msg) {
     clientNotificationHandler(*msg);
   }
 
-  m_notificationSema.release();
+  lock.unlock();
   if (TcrMessage::getAllEPDisMess() != msg) _GEODE_SAFE_DELETE(msg);
 }
 
@@ -2917,8 +2914,10 @@ void ThinClientRegion::release(bool invokeCallbacks) {
   if (m_released) {
     return;
   }
+
+  std::unique_lock<std::mutex> lock(m_notificationMutex, std::defer_lock);
   if (!m_notifyRelease) {
-    m_notificationSema.acquire();
+    lock.lock();
   }
 
   destroyDM(invokeCallbacks);
@@ -3147,7 +3146,7 @@ bool ThinClientRegion::executeFunctionSH(
   auto resultCollectorLock = std::make_shared<std::recursive_mutex>();
   const auto& userAttr = UserAttributes::threadLocalUserAttributes;
   std::vector<std::shared_ptr<OnRegionFunctionExecution>> feWorkers;
-  auto* threadPool =
+  auto& threadPool =
       CacheRegionHelper::getCacheImpl(&getCache())->getThreadPool();
 
   for (const auto& locationIter : *locationMap) {
@@ -3157,14 +3156,13 @@ bool ThinClientRegion::executeFunctionSH(
         func, this, args, routingObj, getResult, timeout,
         dynamic_cast<ThinClientPoolDM*>(m_tcrdm), resultCollectorLock, rc,
         userAttr, false, serverLocation, allBuckets);
-    threadPool->perform(worker.get());
+    threadPool.perform(worker);
     feWorkers.push_back(worker);
   }
 
   GfErrType abortError = GF_NOERR;
 
-  for (auto iter = std::begin(feWorkers); iter != std::end(feWorkers);) {
-    auto worker = *iter;
+  for (auto worker : feWorkers) {
     auto err = worker->getResult();
     auto currentReply = worker->getReply();
 
@@ -3230,8 +3228,6 @@ bool ThinClientRegion::executeFunctionSH(
         }
       }
     }
-
-    iter = feWorkers.erase(iter);
   }
 
   if (abortError != GF_NOERR) {
diff --git a/cppcache/src/ThinClientRegion.hpp b/cppcache/src/ThinClientRegion.hpp
index 2f00550..a7ec893 100644
--- a/cppcache/src/ThinClientRegion.hpp
+++ b/cppcache/src/ThinClientRegion.hpp
@@ -24,7 +24,6 @@
 #include <unordered_map>
 
 #include <ace/RW_Thread_Mutex.h>
-#include <ace/Semaphore.h>
 #include <ace/Task.h>
 
 #include <geode/ResultCollector.hpp>
@@ -36,7 +35,6 @@
 #include "Queue.hpp"
 #include "RegionGlobalLocks.hpp"
 #include "TcrChunkedContext.hpp"
-#include "TcrEndpoint.hpp"
 #include "TcrMessage.hpp"
 
 namespace apache {
@@ -44,6 +42,7 @@ namespace geode {
 namespace client {
 
 class ThinClientBaseDM;
+class TcrEndpoint;
 
 /**
  * @class ThinClientRegion ThinClientRegion.hpp
@@ -303,7 +302,7 @@ class APACHE_GEODE_EXPORT ThinClientRegion : public LocalRegion {
       m_durableInterestListRegexForUpdatesAsInvalidates;
 
   bool m_notifyRelease;
-  ACE_Semaphore m_notificationSema;
+  std::mutex m_notificationMutex;
 
   bool m_isDurableClnt;
 
diff --git a/cppcache/src/ThinClientStickyManager.hpp b/cppcache/src/ThinClientStickyManager.hpp
index ec02b8a..dbb2fa7 100644
--- a/cppcache/src/ThinClientStickyManager.hpp
+++ b/cppcache/src/ThinClientStickyManager.hpp
@@ -25,6 +25,7 @@
 #include <set>
 #include <vector>
 
+#include "ErrType.hpp"
 #include "TssConnectionWrapper.hpp"
 
 namespace apache {
diff --git a/cppcache/src/ThreadPool.cpp b/cppcache/src/ThreadPool.cpp
index 9921f7d..b74bf24 100644
--- a/cppcache/src/ThreadPool.cpp
+++ b/cppcache/src/ThreadPool.cpp
@@ -17,131 +17,65 @@
 
 #include "ThreadPool.hpp"
 
-#include <geode/SystemProperties.hpp>
-
-#include "CacheImpl.hpp"
-#include "DistributedSystem.hpp"
 #include "DistributedSystemImpl.hpp"
 
 namespace apache {
 namespace geode {
 namespace client {
 
-ThreadPoolWorker::ThreadPoolWorker(IThreadPool* manager)
-    : manager_(manager), queue_(msg_queue()), shutdown_(0) {
-#if defined(_MACOSX)
-  threadId_ = nullptr;
-#else
-  threadId_ = 0;
-#endif
-}
-
-ThreadPoolWorker::~ThreadPoolWorker() { shutDown(); }
-
-int ThreadPoolWorker::perform(ACE_Method_Request* req) {
-  ACE_TRACE(ACE_TEXT("Worker::perform"));
-  return queue_.enqueue(req);
-}
-
-int ThreadPoolWorker::svc(void) {
-  threadId_ = ACE_Thread::self();
-  while (1) {
-    ACE_Method_Request* request = queue_.dequeue();
-    if (request == nullptr) {
-      shutDown();
-      break;
-    }
-
-    // Invoke the request
-    request->call();
+const char* ThreadPool::NC_Pool_Thread = "NC Pool Thread";
 
-    // Return to work.
-    manager_->returnToWork(this);
+ThreadPool::ThreadPool(size_t threadPoolSize) : shutdown_(false) {
+  workers_.reserve(threadPoolSize);
+  for (size_t i = 0; i < threadPoolSize; i++) {
+    workers_.emplace_back([this] {
+      DistributedSystemImpl::setThreadName(NC_Pool_Thread);
+      while (true) {
+        std::unique_lock<decltype(queueMutex_)> lock(queueMutex_);
+        queueCondition_.wait(lock,
+                             [this] { return shutdown_ || !queue_.empty(); });
+
+        if (shutdown_) {
+          break;
+        }
+
+        auto work = queue_.front();
+        queue_.pop_front();
+
+        lock.unlock();
+
+        try {
+          work->call();
+        } catch (...) {
+          // ignore
+        }
+      }
+    });
   }
-  return 0;
-}
-
-int ThreadPoolWorker::shutDown(void) {
-  if (shutdown_ != 1) {
-    queue_.queue()->close();
-    wait();
-    shutdown_ = 1;
-  }
-
-  return shutdown_;
-}
-
-ACE_thread_t ThreadPoolWorker::threadId(void) { return threadId_; }
-
-ThreadPool::ThreadPool(uint32_t threadPoolSize)
-    : poolSize_(threadPoolSize), shutdown_(0) {
-  activate();
 }
 
 ThreadPool::~ThreadPool() { shutDown(); }
 
-int ThreadPool::perform(ACE_Method_Request* req) { return queue_.enqueue(req); }
-
-const char* ThreadPool::NC_Pool_Thread = "NC Pool Thread";
-int ThreadPool::svc(void) {
-  DistributedSystemImpl::setThreadName(NC_Pool_Thread);
-  // Create pool when you get in the first time.
-  createWorkerPool();
-  while (!done()) {
-    // Get the next message
-    ACE_Method_Request* request = queue_.dequeue();
-    if (request == nullptr) {
-      shutDown();
-      break;
-    }
-    // Choose a worker.
-    auto worker = chooseWorker();
-    // Ask the worker to do the job.
-    worker->perform(request);
-  }
-  return 0;
-}
-
-int ThreadPool::shutDown(void) {
-  if (shutdown_ != 1) {
-    queue_.queue()->close();
-    wait();
-    shutdown_ = 1;
-  }
-
-  return shutdown_;
-}
+void ThreadPool::perform(std::shared_ptr<Callable> req) {
+  std::unique_lock<decltype(queueMutex_)> lock(queueMutex_);
+  auto wasEmpty = queue_.empty();
+  queue_.push_back(std::move(req));
+  lock.unlock();
 
-int ThreadPool::returnToWork(ThreadPoolWorker* worker) {
-  std::unique_lock<decltype(workersLock_)> lock(workersLock_);
-  workers_.push_back(worker);
-  workersCond_.notify_one();
-  return 0;
-}
-
-ThreadPoolWorker* ThreadPool::chooseWorker(void) {
-  std::unique_lock<decltype(workersLock_)> lock(workersLock_);
-  if (workers_.empty()) {
-    workersCond_.wait(lock, [this] { return !workers_.empty(); });
+  if (wasEmpty) {
+    queueCondition_.notify_all();
   }
-  auto worker = workers_.front();
-  workers_.pop_front();
-  return worker;
 }
 
-int ThreadPool::createWorkerPool(void) {
-  std::unique_lock<decltype(workersLock_)> lock(workersLock_);
-  for (int i = 0; i < poolSize_; i++) {
-    ThreadPoolWorker* worker;
-    ACE_NEW_RETURN(worker, ThreadPoolWorker(this), -1);
-    workers_.push_back(worker);
-    worker->activate();
+void ThreadPool::shutDown(void) {
+  if (!shutdown_.exchange(true)) {
+    queueCondition_.notify_all();
+    for (auto& worker : workers_) {
+      worker.join();
+    }
   }
-  return 0;
 }
 
-int ThreadPool::done(void) { return (shutdown_ == 1); }
-
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/ThreadPool.hpp b/cppcache/src/ThreadPool.hpp
index 73ddfc5..b59fe46 100644
--- a/cppcache/src/ThreadPool.hpp
+++ b/cppcache/src/ThreadPool.hpp
@@ -20,22 +20,25 @@
 #ifndef GEODE_THREADPOOL_H_
 #define GEODE_THREADPOOL_H_
 
+#include <atomic>
 #include <condition_variable>
 #include <deque>
 #include <mutex>
-
-#include <ace/Activation_Queue.h>
-#include <ace/Method_Request.h>
-#include <ace/Recursive_Thread_Mutex.h>
-#include <ace/Singleton.h>
-#include <ace/Task.h>
+#include <thread>
+#include <vector>
 
 namespace apache {
 namespace geode {
 namespace client {
 
+class Callable {
+ public:
+  virtual ~Callable() noexcept = default;
+  virtual void call() = 0;
+};
+
 template <class T>
-class PooledWork : public ACE_Method_Request {
+class PooledWork : public Callable {
  private:
   T m_retVal;
   std::recursive_mutex m_mutex;
@@ -45,9 +48,9 @@ class PooledWork : public ACE_Method_Request {
  public:
   PooledWork() : m_mutex(), m_cond(), m_done(false) {}
 
-  virtual ~PooledWork() {}
+  ~PooledWork() override {}
 
-  virtual int call(void) {
+  void call() override {
     T res = execute();
 
     std::lock_guard<decltype(m_mutex)> lock(m_mutex);
@@ -55,8 +58,6 @@ class PooledWork : public ACE_Method_Request {
     m_retVal = res;
     m_done = true;
     m_cond.notify_all();
-
-    return 0;
   }
 
   T getResult(void) {
@@ -73,54 +74,21 @@ class PooledWork : public ACE_Method_Request {
   virtual T execute(void) = 0;
 };
 
-class ThreadPoolWorker;
-
-class IThreadPool {
- public:
-  virtual int returnToWork(ThreadPoolWorker* worker) = 0;
-  virtual ~IThreadPool() {}
-};
-
-class ThreadPoolWorker : public ACE_Task<ACE_MT_SYNCH> {
+class ThreadPool {
  public:
-  explicit ThreadPoolWorker(IThreadPool* manager);
-  virtual ~ThreadPoolWorker();
-  int perform(ACE_Method_Request* req);
-  int shutDown(void);
-
-  virtual int svc(void);
-  ACE_thread_t threadId(void);
-
- private:
-  IThreadPool* manager_;
-  ACE_thread_t threadId_;
-  ACE_Activation_Queue queue_;
-  int shutdown_;
-};
-
-class ThreadPool : public ACE_Task_Base, IThreadPool {
-  friend class ACE_Singleton<ThreadPool, ACE_Recursive_Thread_Mutex>;
+  explicit ThreadPool(size_t threadPoolSize);
+  ~ThreadPool();
 
- public:
-  explicit ThreadPool(uint32_t threadPoolSize);
-  virtual ~ThreadPool();
-  int perform(ACE_Method_Request* req);
-  int svc(void);
-  int shutDown(void);
-  virtual int returnToWork(ThreadPoolWorker* worker);
+  void perform(std::shared_ptr<Callable> req);
 
- private:
-  ThreadPoolWorker* chooseWorker(void);
-  int createWorkerPool(void);
-  int done(void);
+  void shutDown(void);
 
  private:
-  int poolSize_;
-  int shutdown_;
-  std::mutex workersLock_;
-  std::condition_variable workersCond_;
-  std::deque<ThreadPoolWorker*> workers_;
-  ACE_Activation_Queue queue_;
+  std::atomic<bool> shutdown_;
+  std::vector<std::thread> workers_;
+  std::deque<std::shared_ptr<Callable>> queue_;
+  std::mutex queueMutex_;
+  std::condition_variable queueCondition_;
   static const char* NC_Pool_Thread;
 };
 
diff --git a/cppcache/src/TssConnectionWrapper.hpp b/cppcache/src/TssConnectionWrapper.hpp
index 8e01590..ee958b7 100644
--- a/cppcache/src/TssConnectionWrapper.hpp
+++ b/cppcache/src/TssConnectionWrapper.hpp
@@ -1,8 +1,3 @@
-#pragma once
-
-#ifndef GEODE_TSSCONNECTIONWRAPPER_H_
-#define GEODE_TSSCONNECTIONWRAPPER_H_
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -19,17 +14,26 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
+#pragma once
+
+#ifndef GEODE_TSSCONNECTIONWRAPPER_H_
+#define GEODE_TSSCONNECTIONWRAPPER_H_
+
 #include <map>
 #include <string>
 
-#include <geode/Pool.hpp>
+#include <ace/TSS_T.h>
 
-#include "TcrEndpoint.hpp"
+#include <geode/Pool.hpp>
 
 namespace apache {
 namespace geode {
 namespace client {
+
+class TcrEndpoint;
 class TcrConnection;
+
 typedef std::map<std::string, TcrConnection*> EpNameVsConnection;
 
 class PoolWrapper {
@@ -74,6 +78,7 @@ class TssConnectionWrapper {
   void releaseSHConnections(std::shared_ptr<Pool> p);
   TcrConnection* getAnyConnection(const char* poolname);
 };
+
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
diff --git a/cppcache/src/config.h.in b/cppcache/src/config.h.in
index 7444cc3..c2a283f 100644
--- a/cppcache/src/config.h.in
+++ b/cppcache/src/config.h.in
@@ -22,6 +22,13 @@
 
 #cmakedefine HAVE_SYS_MOUNT_H
 
+#cmakedefine CMAKE_USE_PTHREADS_INIT
+#if defined(CMAKE_USE_PTHREADS_INIT)
+#cmakedefine HAVE_PTHREAD_H
+#cmakedefine HAVE_PTHREAD_HX
+#cmakedefine HAVE_pthread_setname_np
+#endif
+
 #cmakedefine HAVE_SIGSTKFLT
 #cmakedefine HAVE_ACE_Select_Reactor
 
diff --git a/cppcache/src/statistics/GeodeStatisticsFactory.cpp b/cppcache/src/statistics/GeodeStatisticsFactory.cpp
index 90a2e57..834d61d 100644
--- a/cppcache/src/statistics/GeodeStatisticsFactory.cpp
+++ b/cppcache/src/statistics/GeodeStatisticsFactory.cpp
@@ -27,6 +27,7 @@
 #include "../util/Log.hpp"
 #include "AtomicStatisticsImpl.hpp"
 #include "OsStatisticsImpl.hpp"
+#include "StatisticDescriptorImpl.hpp"
 
 namespace apache {
 namespace geode {
@@ -50,15 +51,14 @@ GeodeStatisticsFactory::~GeodeStatisticsFactory() {
     m_statMngr = nullptr;
 
     // Clean Map : Delete all the pointers of StatisticsType from the map.
-    if (statsTypeMap.total_size() == 0) return;
+    std::lock_guard<decltype(statsTypeMap)::mutex_type> lock(
+        statsTypeMap.mutex());
+    if (statsTypeMap.empty()) return;
 
-    auto iterFind = statsTypeMap.begin();
-    while (iterFind != statsTypeMap.end()) {
-      delete (*iterFind).int_id_;
-      (*iterFind).int_id_ = nullptr;
-      iterFind++;
+    for (auto& entry : statsTypeMap) {
+      delete entry.second;
     }
-    statsTypeMap.unbind_all();
+    statsTypeMap.clear();
 
   } catch (const Exception& ex) {
     Log::warningCatch("~GeodeStatisticsFactory swallowing Geode exception", ex);
@@ -152,20 +152,17 @@ Statistics* GeodeStatisticsFactory::findFirstStatisticsByType(
 
 StatisticsTypeImpl* GeodeStatisticsFactory::addType(StatisticsTypeImpl* st) {
   const auto& name = st->getName();
-  int status;
   try {
-    StatisticsTypeImpl* st1;
-    status = statsTypeMap.rebind(name, st, st1);
+    auto status = statsTypeMap.emplace(name, st);
+    if (!status.second) {
+      throw IllegalArgumentException(
+          "GeodeStatisticsFactory::addType: failed to add new type " + name);
+    }
   } catch (const std::exception& ex) {
     throw IllegalArgumentException(ex.what());
   } catch (...) {
     throw IllegalArgumentException("addType: unknown exception");
   }
-  if (status == 1) {
-  } else if (status == -1) {
-    throw IllegalArgumentException(
-        "GeodeStatisticsFactory::addType: failed to add new type " + name);
-  }
   return st;
 }
 
@@ -175,8 +172,7 @@ StatisticsTypeImpl* GeodeStatisticsFactory::addType(StatisticsTypeImpl* st) {
 StatisticsType* GeodeStatisticsFactory::createType(
     const std::string& name, const std::string& description,
     StatisticDescriptor** stats, int32_t statsLength) {
-  StatisticsTypeImpl* st =
-      new StatisticsTypeImpl(name, description, stats, statsLength);
+  auto st = new StatisticsTypeImpl(name, description, stats, statsLength);
 
   if (st != nullptr) {
     st = addType(st);
@@ -189,15 +185,13 @@ StatisticsType* GeodeStatisticsFactory::createType(
 
 StatisticsType* GeodeStatisticsFactory::findType(
     const std::string& name) const {
-  StatisticsTypeImpl* st = nullptr;
-  int status = statsTypeMap.find(name, st);
-  if (status == -1) {
+  auto&& lock = statsTypeMap.make_lock();
+  const auto& entry = statsTypeMap.find(name);
+  if (entry == statsTypeMap.end()) {
     std::string s = "There is no statistic named \"" + name + "\"";
-    // LOGWARN(s.c_str());
-    // throw IllegalArgumentException(s.c_str());
     return nullptr;
   } else {
-    return st;
+    return entry->second;
   }
 }
 
diff --git a/cppcache/src/statistics/GeodeStatisticsFactory.hpp b/cppcache/src/statistics/GeodeStatisticsFactory.hpp
index fde07ce..4d6ed46 100644
--- a/cppcache/src/statistics/GeodeStatisticsFactory.hpp
+++ b/cppcache/src/statistics/GeodeStatisticsFactory.hpp
@@ -21,14 +21,13 @@
 #define GEODE_STATISTICS_GEODESTATISTICSFACTORY_H_
 
 #include <mutex>
+#include <unordered_map>
 #include <vector>
 
-#include <ace/Map_Manager.h>
-#include <ace/Recursive_Thread_Mutex.h>
-
 #include <geode/ExceptionTypes.hpp>
 #include <geode/internal/geode_globals.hpp>
 
+#include "../util/synchronized_map.hpp"
 #include "StatisticsFactory.hpp"
 #include "StatisticsManager.hpp"
 #include "StatisticsTypeImpl.hpp"
@@ -60,7 +59,9 @@ class GeodeStatisticsFactory : public StatisticsFactory {
   std::recursive_mutex m_statsListUniqueIdLock;
 
   /* Maps a stat name to its StatisticDescriptor*/
-  ACE_Map_Manager<std::string, StatisticsTypeImpl*, ACE_Recursive_Thread_Mutex>
+  apache::geode::client::synchronized_map<
+      std::unordered_map<std::string, StatisticsTypeImpl*>,
+      std::recursive_mutex>
       statsTypeMap;
 
   StatisticsTypeImpl* addType(StatisticsTypeImpl* t);
diff --git a/cppcache/src/statistics/HostStatSampler.cpp b/cppcache/src/statistics/HostStatSampler.cpp
index 579c2a2..1fc5b8e 100644
--- a/cppcache/src/statistics/HostStatSampler.cpp
+++ b/cppcache/src/statistics/HostStatSampler.cpp
@@ -29,8 +29,6 @@
 #include <ace/INET_Addr.h>
 #include <ace/OS_NS_sys_stat.h>
 #include <ace/OS_NS_sys_utsname.h>
-#include <ace/Task.h>
-#include <ace/Thread_Mutex.h>
 #include <boost/process/environment.hpp>
 
 #include <geode/SystemProperties.hpp>
@@ -40,6 +38,7 @@
 #include "../ClientHealthStats.hpp"
 #include "../ClientProxyMembershipID.hpp"
 #include "../DistributedSystem.hpp"
+#include "../TcrConnectionManager.hpp"
 #include "../util/Log.hpp"
 #include "GeodeStatisticsFactory.hpp"
 #include "StatArchiveWriter.hpp"
@@ -480,15 +479,14 @@ void HostStatSampler::closeSpecialStats() {}
 void HostStatSampler::checkListeners() {}
 
 void HostStatSampler::start() {
-  if (!m_running) {
-    m_running = true;
-    this->activate();
+  if (!m_running.exchange(true)) {
+    m_thread = std::thread(&HostStatSampler::svc, this);
   }
 }
 
 void HostStatSampler::stop() {
   m_stopRequested = true;
-  this->wait();
+  m_thread.join();
 }
 
 bool HostStatSampler::isRunning() { return m_running; }
@@ -644,7 +642,7 @@ void HostStatSampler::checkDiskLimit() {
   }
 }
 
-int32_t HostStatSampler::svc(void) {
+void HostStatSampler::svc(void) {
   client::DistributedSystemImpl::setThreadName(NC_HSS_Thread);
   try {
     initSpecialStats();
@@ -716,7 +714,6 @@ int32_t HostStatSampler::svc(void) {
        closeSpecialStats();
    }*/
   m_running = false;
-  return 0;
 }
 }  // namespace statistics
 }  // namespace geode
diff --git a/cppcache/src/statistics/HostStatSampler.hpp b/cppcache/src/statistics/HostStatSampler.hpp
index 1a1ec2e..3091e91 100644
--- a/cppcache/src/statistics/HostStatSampler.hpp
+++ b/cppcache/src/statistics/HostStatSampler.hpp
@@ -20,10 +20,12 @@
 #ifndef GEODE_STATISTICS_HOSTSTATSAMPLER_H_
 #define GEODE_STATISTICS_HOSTSTATSAMPLER_H_
 
+#include <atomic>
 #include <chrono>
 #include <memory>
 #include <mutex>
 #include <string>
+#include <thread>
 #include <vector>
 
 #include <ace/Task.h>
@@ -63,9 +65,7 @@ class StatisticsManager;
  * HostStatSampler implements a thread which will monitor, sample and archive
  * statistics. It only has the common functionalities which any sampler needs.
  */
-class APACHE_GEODE_EXPORT HostStatSampler : public ACE_Task_Base,
-                                            private NonCopyable,
-                                            private NonAssignable {
+class HostStatSampler : private NonCopyable, private NonAssignable {
  public:
   /*
    * Constructor:
@@ -179,7 +179,7 @@ class APACHE_GEODE_EXPORT HostStatSampler : public ACE_Task_Base,
   /**
    * The function executed by the thread
    */
-  int32_t svc(void);
+  void svc(void);
 
   /**
    * Method to know whether the sampling thread is running or not.
@@ -191,10 +191,10 @@ class APACHE_GEODE_EXPORT HostStatSampler : public ACE_Task_Base,
  private:
   std::recursive_mutex m_samplingLock;
   bool m_adminError;
-  // Related to ACE Thread.
-  bool m_running;
-  bool m_stopRequested;
-  volatile bool m_isStatDiskSpaceEnabled;
+  std::thread m_thread;
+  std::atomic<bool> m_running;
+  std::atomic<bool> m_stopRequested;
+  std::atomic<bool> m_isStatDiskSpaceEnabled;
   std::unique_ptr<StatArchiveWriter> m_archiver;
   StatSamplerStats* m_samplerStats;
   const char* m_durableClientId;
diff --git a/cppcache/src/statistics/PoolStatsSampler.cpp b/cppcache/src/statistics/PoolStatsSampler.cpp
index c6b868f..b7d13fe 100644
--- a/cppcache/src/statistics/PoolStatsSampler.cpp
+++ b/cppcache/src/statistics/PoolStatsSampler.cpp
@@ -17,9 +17,7 @@
 
 #include "PoolStatsSampler.hpp"
 
-#include <chrono>
 #include <string>
-#include <thread>
 
 #include "../CacheImpl.hpp"
 #include "../ClientHealthStats.hpp"
@@ -40,18 +38,16 @@ const char* PoolStatsSampler::NC_PSS_Thread = "NC PSS Thread";
 
 PoolStatsSampler::PoolStatsSampler(milliseconds sampleRate, CacheImpl* cache,
                                    ThinClientPoolDM* distMan)
-    : m_sampleRate(sampleRate),
+    : m_running(false),
+      m_stopRequested(false),
+      m_sampleRate(sampleRate),
+      m_adminRegion(AdminRegion::create(cache, distMan)),
       m_distMan(distMan),
       m_statisticsFactory(
-          cache->getStatisticsManager().getStatisticsFactory()) {
-  m_running = false;
-  m_stopRequested = false;
-  m_adminRegion = AdminRegion::create(cache, distMan);
-}
+          cache->getStatisticsManager().getStatisticsFactory()) {}
 
-int32_t PoolStatsSampler::svc() {
+void PoolStatsSampler::svc() {
   client::DistributedSystemImpl::setThreadName(NC_PSS_Thread);
-  // ACE_Guard < ACE_Recursive_Thread_Mutex > _guard( m_lock );
   while (!m_stopRequested) {
     auto sampleStart = high_resolution_clock::now();
     putStatsInAdminRegion();
@@ -65,21 +61,19 @@ int32_t PoolStatsSampler::svc() {
       sleepDuration -= wakeInterval;
     }
   }
-  return 0;
 }
 
 void PoolStatsSampler::start() {
-  if (!m_running) {
-    m_running = true;
-    this->activate();
+  if (!m_running.exchange(true)) {
+    m_thread = std::thread(&PoolStatsSampler::svc, this);
   }
 }
 
 void PoolStatsSampler::stop() {
-  // ACE_Guard < ACE_Recursive_Thread_Mutex > _guard( m_lock );
   m_stopRequested = true;
-  this->wait();
+  m_thread.join();
 }
+
 bool PoolStatsSampler::isRunning() { return m_running; }
 
 void PoolStatsSampler::putStatsInAdminRegion() {
diff --git a/cppcache/src/statistics/PoolStatsSampler.hpp b/cppcache/src/statistics/PoolStatsSampler.hpp
index 067723f..687c47d 100644
--- a/cppcache/src/statistics/PoolStatsSampler.hpp
+++ b/cppcache/src/statistics/PoolStatsSampler.hpp
@@ -20,16 +20,13 @@
 #ifndef GEODE_STATISTICS_POOLSTATSSAMPLER_H_
 #define GEODE_STATISTICS_POOLSTATSSAMPLER_H_
 
+#include <atomic>
 #include <chrono>
 #include <memory>
-#include <mutex>
-
-#include <ace/Task.h>
+#include <thread>
 
 #include <geode/internal/geode_globals.hpp>
 
-#include "GeodeStatisticsFactory.hpp"
-
 namespace apache {
 namespace geode {
 
@@ -49,29 +46,30 @@ using client::CacheImpl;
 using client::ThinClientPoolDM;
 
 class StatisticsManager;
+class GeodeStatisticsFactory;
 
-class APACHE_GEODE_EXPORT PoolStatsSampler : public ACE_Task_Base {
+class PoolStatsSampler {
  public:
   PoolStatsSampler() = delete;
   PoolStatsSampler(std::chrono::milliseconds sampleRate, CacheImpl* cache,
                    ThinClientPoolDM* distMan);
   PoolStatsSampler& operator=(const PoolStatsSampler&) = delete;
   PoolStatsSampler(const PoolStatsSampler& PoolStatsSampler) = delete;
-  ~PoolStatsSampler() noexcept override {}
+  ~PoolStatsSampler() noexcept {}
 
   void start();
   void stop();
-  int32_t svc(void) override;
+  void svc(void);
   bool isRunning();
 
  private:
   void putStatsInAdminRegion();
-  volatile bool m_running;
-  volatile bool m_stopRequested;
+  std::thread m_thread;
+  std::atomic<bool> m_running;
+  std::atomic<bool> m_stopRequested;
   std::chrono::milliseconds m_sampleRate;
   std::shared_ptr<AdminRegion> m_adminRegion;
   ThinClientPoolDM* m_distMan;
-  std::recursive_mutex m_lock;
   static const char* NC_PSS_Thread;
   GeodeStatisticsFactory* m_statisticsFactory;
 };
diff --git a/cppcache/src/statistics/StatArchiveWriter.cpp b/cppcache/src/statistics/StatArchiveWriter.cpp
index 0dbea36..a216c06 100644
--- a/cppcache/src/statistics/StatArchiveWriter.cpp
+++ b/cppcache/src/statistics/StatArchiveWriter.cpp
@@ -32,6 +32,7 @@
 #include "../CacheImpl.hpp"
 #include "../util/chrono/time_point.hpp"
 #include "GeodeStatisticsFactory.hpp"
+#include "HostStatSampler.hpp"
 
 namespace apache {
 namespace geode {
diff --git a/cppcache/src/statistics/StatArchiveWriter.hpp b/cppcache/src/statistics/StatArchiveWriter.hpp
index 3055fb0..4f8c2a3 100644
--- a/cppcache/src/statistics/StatArchiveWriter.hpp
+++ b/cppcache/src/statistics/StatArchiveWriter.hpp
@@ -32,7 +32,6 @@
 #include "../NonCopyable.hpp"
 #include "../SerializationRegistry.hpp"
 #include "../util/Log.hpp"
-#include "HostStatSampler.hpp"
 #include "StatisticDescriptor.hpp"
 #include "StatisticDescriptorImpl.hpp"
 #include "Statistics.hpp"
@@ -65,6 +64,8 @@ namespace apache {
 namespace geode {
 namespace statistics {
 
+class HostStatSampler;
+
 using apache::geode::client::CacheImpl;
 using apache::geode::client::DataOutput;
 using apache::geode::client::NonAssignable;
diff --git a/cppcache/src/statistics/StatisticsManager.cpp b/cppcache/src/statistics/StatisticsManager.cpp
index ea2d0cb..1f07ed1 100644
--- a/cppcache/src/statistics/StatisticsManager.cpp
+++ b/cppcache/src/statistics/StatisticsManager.cpp
@@ -25,6 +25,7 @@
 #include "../util/Log.hpp"
 #include "AtomicStatisticsImpl.hpp"
 #include "GeodeStatisticsFactory.hpp"
+#include "HostStatSampler.hpp"
 #include "OsStatisticsImpl.hpp"
 
 namespace apache {
@@ -46,13 +47,14 @@ StatisticsManager::StatisticsManager(
       std::unique_ptr<GeodeStatisticsFactory>(new GeodeStatisticsFactory(this));
 
   try {
-    if (m_sampler == nullptr && enabled) {
-      m_sampler = new HostStatSampler(filePath, m_sampleIntervalMs, this, cache,
-                                      statFileLimit, statDiskSpaceLimit);
+    if (enabled) {
+      m_sampler = std::unique_ptr<HostStatSampler>(
+          new HostStatSampler(filePath, m_sampleIntervalMs, this, cache,
+                              statFileLimit, statDiskSpaceLimit));
       m_sampler->start();
     }
   } catch (...) {
-    delete m_sampler;
+    m_sampler = nullptr;
     throw;
   }
 }
@@ -109,9 +111,8 @@ std::recursive_mutex& StatisticsManager::getListMutex() {
 }
 
 void StatisticsManager::closeSampler() {
-  if (m_sampler != nullptr) {
+  if (m_sampler) {
     m_sampler->stop();
-    delete m_sampler;
     m_sampler = nullptr;
   }
 }
diff --git a/cppcache/src/statistics/StatisticsManager.hpp b/cppcache/src/statistics/StatisticsManager.hpp
index f4c99c2..b91968d 100644
--- a/cppcache/src/statistics/StatisticsManager.hpp
+++ b/cppcache/src/statistics/StatisticsManager.hpp
@@ -29,7 +29,6 @@
 
 #include "../AdminRegion.hpp"
 #include "GeodeStatisticsFactory.hpp"
-#include "HostStatSampler.hpp"
 #include "Statistics.hpp"
 #include "StatisticsTypeImpl.hpp"
 
@@ -40,6 +39,7 @@ namespace statistics {
 using apache::geode::client::AdminRegion;
 
 class GeodeStatisticsFactory;
+class HostStatSampler;
 
 /**
  * Head Application Manager for Statistics Module.
@@ -51,7 +51,7 @@ class StatisticsManager {
   std::chrono::milliseconds m_sampleIntervalMs;
 
   // Statistics sampler
-  HostStatSampler* m_sampler;
+  std::unique_ptr<HostStatSampler> m_sampler;
 
   // Vector containing all the Stats objects
   std::vector<Statistics*> m_statsList;
diff --git a/cppcache/src/NonCopyable.hpp b/cppcache/src/util/queue.hpp
similarity index 58%
copy from cppcache/src/NonCopyable.hpp
copy to cppcache/src/util/queue.hpp
index 3bb3063..3e0092a 100644
--- a/cppcache/src/NonCopyable.hpp
+++ b/cppcache/src/util/queue.hpp
@@ -17,35 +17,38 @@
 
 #pragma once
 
-#ifndef GEODE_NONCOPYABLE_H_
-#define GEODE_NONCOPYABLE_H_
-
-#include <geode/internal/geode_base.hpp>
+#ifndef NATIVECLIENT_UTIL_QUEUE_H
+#define NATIVECLIENT_UTIL_QUEUE_H
 
 namespace apache {
 namespace geode {
 namespace client {
+namespace queue {
 
-class APACHE_GEODE_EXPORT NonCopyable {
- protected:
-  NonCopyable() {}
-  ~NonCopyable() {}
-
- private:
-  NonCopyable(const NonCopyable&);
-};
-
-class APACHE_GEODE_EXPORT NonAssignable {
- protected:
-  NonAssignable() {}
-  ~NonAssignable() {}
-
- private:
-  const NonAssignable& operator=(const NonAssignable&);
-};
-
+/**
+ * Coalesce the contents of a Queue structure based on the given value. Will
+ * remove all values at the front of the queue that equal value.
+ *
+ * @tparam Queue type to coalesce
+ * @tparam Type of value to coalesce
+ * @param queue to coalesce
+ * @param value to coalesce
+ */
+template <class Queue, class Type>
+inline void coalesce(Queue& queue, const Type& value) {
+  while (!queue.empty()) {
+    const auto& next = queue.front();
+    if (next == value) {
+      queue.pop_front();
+    } else {
+      break;
+    }
+  }
+}
+
+}  // namespace queue
 }  // namespace client
 }  // namespace geode
 }  // namespace apache
 
-#endif  // GEODE_NONCOPYABLE_H_
+#endif  // NATIVECLIENT_UTIL_QUEUE_H
diff --git a/cppcache/src/util/string.hpp b/cppcache/src/util/string.hpp
index e05e263..f0b8d12 100644
--- a/cppcache/src/util/string.hpp
+++ b/cppcache/src/util/string.hpp
@@ -42,7 +42,7 @@ inline std::u16string to_utf16(const std::string& utf8) {
 #if defined(_MSC_VER) && _MSC_VER >= 1900
   /*
    * Workaround for missing std:codecvt identifier.
-   * https://connect.microsoft.com/VisualStudio/feedback/details/1403302
+   * https://stackoverflow.com/questions/32055357/visual-studio-c-2015-stdcodecvt-with-char16-t-or-char32-t
    */
   auto int16String =
       std::wstring_convert<std::codecvt_utf8_utf16<int16_t>, int16_t>{}
@@ -59,7 +59,7 @@ inline std::u16string to_utf16(const std::u32string& ucs4) {
 #if defined(_MSC_VER) && _MSC_VER >= 1900
   /*
    * Workaround for missing std:codecvt identifier.
-   * https://connect.microsoft.com/VisualStudio/feedback/details/1403302
+   * https://stackoverflow.com/questions/32055357/visual-studio-c-2015-stdcodecvt-with-char16-t-or-char32-t
    */
   auto data = reinterpret_cast<const int32_t*>(ucs4.data());
   auto bytes =
@@ -83,7 +83,7 @@ inline std::u16string to_utf16(const char32_t* ucs4, size_t len) {
 #if defined(_MSC_VER) && _MSC_VER >= 1900
   /*
    * Workaround for missing std:codecvt identifier.
-   * https://connect.microsoft.com/VisualStudio/feedback/details/1403302
+   * https://stackoverflow.com/questions/32055357/visual-studio-c-2015-stdcodecvt-with-char16-t-or-char32-t
    */
   auto data = reinterpret_cast<const int32_t*>(ucs4);
   auto bytes =
@@ -107,7 +107,7 @@ inline std::u32string to_ucs4(const std::u16string& utf16) {
 #if defined(_MSC_VER) && _MSC_VER >= 1900
   /*
    * Workaround for missing std:codecvt identifier.
-   * https://connect.microsoft.com/VisualStudio/feedback/details/1403302
+   * https://stackoverflow.com/questions/32055357/visual-studio-c-2015-stdcodecvt-with-char16-t-or-char32-t
    */
   auto data = reinterpret_cast<const char*>(utf16.data());
   auto tmp =
@@ -130,7 +130,7 @@ inline std::string to_utf8(const std::u16string& utf16) {
 #if defined(_MSC_VER) && _MSC_VER >= 1900
   /*
    * Workaround for missing std:codecvt identifier.
-   * https://connect.microsoft.com/VisualStudio/feedback/details/1403302
+   * https://stackoverflow.com/questions/32055357/visual-studio-c-2015-stdcodecvt-with-char16-t-or-char32-t
    */
   auto data = reinterpret_cast<const int16_t*>(utf16.data());
   return std::wstring_convert<std::codecvt_utf8_utf16<int16_t>, int16_t>{}
@@ -145,7 +145,7 @@ inline std::string to_utf8(const std::u32string& ucs4) {
 #if defined(_MSC_VER) && _MSC_VER >= 1900
   /*
    * Workaround for missing std:codecvt identifier.
-   * https://connect.microsoft.com/VisualStudio/feedback/details/1403302
+   * https://stackoverflow.com/questions/32055357/visual-studio-c-2015-stdcodecvt-with-char16-t-or-char32-t
    */
   auto data = reinterpret_cast<const int32_t*>(ucs4.data());
   return std::wstring_convert<std::codecvt_utf8<int32_t>, int32_t>{}.to_bytes(
diff --git a/cppcache/src/util/synchronized_map.hpp b/cppcache/src/util/synchronized_map.hpp
new file mode 100644
index 0000000..c3677e9
--- /dev/null
+++ b/cppcache/src/util/synchronized_map.hpp
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#ifndef GEODE_UTIL_SYNCHRONIZED_MAP_H_
+#define GEODE_UTIL_SYNCHRONIZED_MAP_H_
+
+#include <mutex>
+#include <utility>
+
+namespace apache {
+namespace geode {
+namespace client {
+
+/**
+ * Wrapper around std::map, std::unordered_map and other map like
+ * implementations.
+ *
+ * This is a very incomplete implementation. Add methods as needed.
+ *
+ * @tparam Map type to wrap.
+ * @tparam Mutex type to synchronize with. Defaults to std::recursive_mutex
+ */
+template <class Map, class Mutex = std::recursive_mutex>
+class synchronized_map {
+ private:
+  Map map_;
+  mutable Mutex mutex_;
+
+ public:
+  typedef Mutex mutex_type;
+  typedef Map map_type;
+  typedef typename Map::key_type key_type;
+  typedef typename Map::mapped_type mapped_type;
+  typedef typename Map::allocator_type allocator_type;
+  typedef typename Map::value_type value_type;
+  typedef typename Map::reference reference;
+  typedef typename Map::const_reference const_reference;
+  typedef typename Map::iterator iterator;
+  typedef typename Map::const_iterator const_iterator;
+  typedef typename Map::difference_type difference_type;
+  typedef typename Map::size_type size_type;
+
+  inline mutex_type& mutex() const noexcept { return mutex_; }
+
+  inline map_type& map() noexcept { return map_; }
+  inline const map_type& map() const noexcept { return map_; }
+
+  /**
+   * Allocates a Lock object around the Mutex and locks the Mutex.
+   *
+   * Example:
+   * \code{.cpp}
+   * auto&& guard = exampleMap.make_lock();
+   * \endcode
+   *
+   * Equivalent to:
+   * \code{.cpp}
+   * std::lock_guard<decltype(exampleMap)::mutex_type> guard(exampleMap.mutex);
+   * \endcode
+   *
+   * @tparam Lock type to allocate. Defaults to std::lock_guard.
+   * @return allocated Lock object with lock taken.
+   * @throws Any exception throws by Mutex::lock()
+   */
+  template <template <class...> class Lock = std::lock_guard>
+  inline Lock<Mutex> make_lock() const {
+    mutex_.lock();
+    return {mutex_, std::adopt_lock};
+  }
+
+  /**
+   * Allocates a Lock object around the Mutex passing any args to the Lock
+   * constructor.
+   *
+   * Example:
+   * \code{.cpp}
+   * auto&& guard = exampleMap.make_lock<std::unique_lock>(std::defer_lock);
+   * \endcode
+   *
+   * Equivalent to:
+   * \code{.cpp}
+   * std::unique_lock<decltype(exampleMap)::mutex_type> guard(exampleMap.mutex,
+   * std::defer_lock); \endcode
+   *
+   * @tparam Lock type to allocate. Defaults to std::lock_guard.
+   * @tparam Args types passed to Lock constructor.
+   * @param args values passed to Lock constructor.
+   * @return allocated Lock object.
+   * @throws Any exception throws by Lock constructors.
+   */
+  template <template <class...> class Lock = std::lock_guard, class... Args>
+  inline Lock<Mutex> make_lock(Args&&... args) const {
+    return {mutex_, std::forward<Args>(args)...};
+  }
+
+  template <class... Args>
+  inline std::pair<typename Map::iterator, bool> emplace(Args&&... args) {
+    std::lock_guard<Mutex> lock(mutex_);
+    return map_.emplace(std::forward<Args>(args)...);
+  }
+
+  inline size_type erase(const key_type& key) {
+    std::lock_guard<Mutex> lock(mutex_);
+    return map_.erase(key);
+  }
+
+  inline bool empty() const noexcept {
+    std::lock_guard<Mutex> lock(mutex_);
+    return map_.empty();
+  }
+
+  inline size_type size() const noexcept {
+    std::lock_guard<Mutex> lock(mutex_);
+    return map_.size();
+  }
+
+  inline void clear() noexcept {
+    std::lock_guard<Mutex> lock(mutex_);
+    map_.clear();
+  }
+
+  inline iterator find(const key_type& key) {
+    //    std::lock_guard<Mutex> lock(mutex_);
+    return map_.find(key);
+  }
+
+  inline const_iterator find(const key_type& key) const {
+    //    std::lock_guard<Mutex> lock(mutex_);
+    return map_.find(key);
+  }
+
+  inline iterator begin() noexcept { return map_.begin(); }
+  inline iterator end() noexcept { return map_.end(); }
+  inline const_iterator begin() const noexcept { return map_.begin(); }
+  inline const_iterator end() const noexcept { return map_.end(); }
+  inline const_iterator cbegin() const noexcept { return map_.begin(); }
+  inline const_iterator cend() const noexcept { return map_.end(); }
+
+  template <class InputIterator>
+  inline void insert(InputIterator first, InputIterator last) {
+    std::lock_guard<Mutex> lock(mutex_);
+    map_.insert(first, last);
+  }
+};
+
+}  // namespace client
+}  // namespace geode
+}  // namespace apache
+
+#endif  // GEODE_UTIL_SYNCHRONIZED_MAP_H_
diff --git a/cppcache/src/util/synchronized_set.hpp b/cppcache/src/util/synchronized_set.hpp
new file mode 100644
index 0000000..4029fb0
--- /dev/null
+++ b/cppcache/src/util/synchronized_set.hpp
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#ifndef GEODE_UTIL_SYNCHRONIZED_SET_H_
+#define GEODE_UTIL_SYNCHRONIZED_SET_H_
+
+#include <mutex>
+#include <utility>
+
+namespace apache {
+namespace geode {
+namespace client {
+
+/**
+ * Wrapper around std::set, std::unordered_set and other set like
+ * implementations.
+ *
+ * This is a very incomplete implementation. Add methods as needed.
+ *
+ * @tparam Set type to wrap.
+ * @tparam Mutex type to synchronize with. Defaults to std::recursive_mutex
+ */
+template <class Set, class Mutex = std::recursive_mutex>
+class synchronized_set {
+ private:
+  Set set_;
+  mutable Mutex mutex_;
+
+ public:
+  typedef Mutex mutex_type;
+  typedef Set set_type;
+  typedef typename Set::key_type key_type;
+  typedef typename Set::allocator_type allocator_type;
+  typedef typename Set::value_type value_type;
+  typedef typename Set::reference reference;
+  typedef typename Set::const_reference const_reference;
+  typedef typename Set::iterator iterator;
+  typedef typename Set::const_iterator const_iterator;
+  typedef typename Set::difference_type difference_type;
+  typedef typename Set::size_type size_type;
+
+  inline mutex_type& mutex() const noexcept { return mutex_; }
+
+  inline set_type& set() noexcept { return set_; }
+  inline const set_type& set() const noexcept { return set_; }
+
+  /**
+   * Allocates a Lock object around the Mutex and locks the Mutex.
+   *
+   * Example:
+   * \code{.cpp}
+   * auto&& guard = exampleSet.make_lock();
+   * \endcode
+   *
+   * Equivalent to:
+   * \code{.cpp}
+   * std::lock_guard<decltype(exampleSet)::mutex_type> guard(exampleSet.mutex);
+   * \endcode
+   *
+   * @tparam Lock type to allocate. Defaults to std::lock_guard.
+   * @return allocated Lock object with lock taken.
+   * @throws Any exception throws by Mutex::lock()
+   */
+  template <template <class...> class Lock = std::lock_guard>
+  inline Lock<Mutex> make_lock() const {
+    mutex_.lock();
+    return {mutex_, std::adopt_lock};
+  }
+
+  /**
+   * Allocates a Lock object around the Mutex passing any args to the Lock
+   * constructor.
+   *
+   * Example:
+   * \code{.cpp}
+   * auto&& guard = exampleSet.make_lock<std::unique_lock>(std::defer_lock);
+   * \endcode
+   *
+   * Equivalent to:
+   * \code{.cpp}
+   * std::unique_lock<decltype(exampleSet)::mutex_type> guard(exampleSet.mutex,
+   * std::defer_lock); \endcode
+   *
+   * @tparam Lock type to allocate. Defaults to std::lock_guard.
+   * @tparam Args types passed to Lock constructor.
+   * @param args values passed to Lock constructor.
+   * @return allocated Lock object.
+   * @throws Any exception throws by Lock constructors.
+   */
+  template <template <class...> class Lock = std::lock_guard, class... Args>
+  inline Lock<Mutex> make_lock(Args&&... args) const {
+    return {mutex_, std::forward<Args>(args)...};
+  }
+
+  template <class... Args>
+  inline std::pair<typename Set::iterator, bool> emplace(Args&&... args) {
+    std::lock_guard<Mutex> lock(mutex_);
+    return set_.emplace(std::forward<Args>(args)...);
+  }
+
+  inline size_type erase(const key_type& key) {
+    std::lock_guard<Mutex> lock(mutex_);
+    return set_.erase(key);
+  }
+
+  inline bool empty() const noexcept {
+    std::lock_guard<Mutex> lock(mutex_);
+    return set_.empty();
+  }
+
+  inline size_type size() const noexcept {
+    std::lock_guard<Mutex> lock(mutex_);
+    return set_.size();
+  }
+
+  inline void clear() noexcept {
+    std::lock_guard<Mutex> lock(mutex_);
+    set_.clear();
+  }
+
+  inline iterator find(const key_type& key) {
+    //    std::lock_guard<Mutex> lock(mutex_);
+    return set_.find(key);
+  }
+
+  inline const_iterator find(const key_type& key) const {
+    //    std::lock_guard<Mutex> lock(mutex_);
+    return set_.find(key);
+  }
+
+  inline iterator begin() noexcept { return set_.begin(); }
+  inline iterator end() noexcept { return set_.end(); }
+  inline const_iterator begin() const noexcept { return set_.begin(); }
+  inline const_iterator end() const noexcept { return set_.end(); }
+  inline const_iterator cbegin() const noexcept { return set_.begin(); }
+  inline const_iterator cend() const noexcept { return set_.end(); }
+
+  template <class InputIterator>
+  inline void insert(InputIterator first, InputIterator last) {
+    std::lock_guard<Mutex> lock(mutex_);
+    set_.insert(first, last);
+  }
+
+  inline std::pair<iterator, bool> insert(value_type&& value) {
+    std::lock_guard<Mutex> lock(mutex_);
+    return set_.insert(std::move(value));
+  }
+
+  inline std::pair<iterator, bool> insert(const value_type& value) {
+    std::lock_guard<Mutex> lock(mutex_);
+    return set_.insert(value);
+  }
+};
+
+}  // namespace client
+}  // namespace geode
+}  // namespace apache
+
+#endif  // GEODE_UTIL_SYNCHRONIZED_SET_H_
diff --git a/cppcache/test/CMakeLists.txt b/cppcache/test/CMakeLists.txt
index d26e090..89ef0b9 100644
--- a/cppcache/test/CMakeLists.txt
+++ b/cppcache/test/CMakeLists.txt
@@ -40,9 +40,16 @@ add_executable(apache-geode_unittests
   RegionAttributesFactoryTest.cpp
   SerializableCreateTests.cpp
   StructSetTest.cpp
-  TcrMessage_unittest.cpp
-  CacheableDate.cpp
-)
+  TcrMessageTest.cpp
+  CacheableDateTest.cpp
+  util/synchronized_mapTest.cpp
+  util/synchronized_setTest.cpp
+  util/JavaModifiedUtf8Tests.cpp
+  util/functionalTests.cpp
+  util/chrono/durationTest.cpp
+  LocalRegionTest.cpp
+  util/queueTest.cpp
+  ThreadPoolTest.cpp)
 
 target_compile_definitions(apache-geode_unittests
   PUBLIC
@@ -59,6 +66,7 @@ target_link_libraries(apache-geode_unittests
     GTest::GTest
     GTest::Main
     Boost::boost
+    Boost::thread
     _WarningsAsError
     _CppCodeCoverage
 )
diff --git a/cppcache/test/CacheableDate.cpp b/cppcache/test/CacheableDateTest.cpp
similarity index 100%
rename from cppcache/test/CacheableDate.cpp
rename to cppcache/test/CacheableDateTest.cpp
diff --git a/cppcache/test/LocalRegionTest.cpp b/cppcache/test/LocalRegionTest.cpp
new file mode 100644
index 0000000..4e7765e
--- /dev/null
+++ b/cppcache/test/LocalRegionTest.cpp
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include <geode/AuthenticatedView.hpp>
+#include <geode/Cache.hpp>
+#include <geode/PoolManager.hpp>
+#include <geode/RegionFactory.hpp>
+#include <geode/RegionShortcut.hpp>
+
+using apache::geode::client::CacheClosedException;
+using apache::geode::client::CacheFactory;
+using apache::geode::client::RegionAttributesFactory;
+using apache::geode::client::RegionShortcut;
+
+/**
+ * Cache should close and throw exceptions on methods called after close.
+ */
+TEST(LocalRegionTest, subRegions) {
+  auto cache = CacheFactory{}.set("log-level", "none").create();
+
+  auto rootRegions = cache.rootRegions();
+  EXPECT_TRUE(rootRegions.empty());
+
+  auto rootRegion1 =
+      cache.createRegionFactory(RegionShortcut::LOCAL).create("rootRegion1");
+  EXPECT_NE(nullptr, rootRegion1);
+
+  auto subRegion11 = rootRegion1->createSubregion(
+      "subRegion11", RegionAttributesFactory().create());
+  EXPECT_NE(nullptr, subRegion11);
+
+  auto subRegion12 = rootRegion1->createSubregion(
+      "subRegion12", RegionAttributesFactory().create());
+  EXPECT_NE(nullptr, subRegion12);
+
+  auto subRegions1 = rootRegion1->subregions(true);
+  EXPECT_EQ(2, subRegions1.size());
+
+  auto rootRegion2 =
+      cache.createRegionFactory(RegionShortcut::LOCAL).create("rootRegion2");
+  EXPECT_NE(nullptr, rootRegion2);
+
+  auto subRegion21 = rootRegion2->createSubregion(
+      "subRegion21", RegionAttributesFactory().create());
+  EXPECT_NE(nullptr, subRegion21);
+
+  auto subRegion211 = subRegion21->createSubregion(
+      "subRegion211", RegionAttributesFactory().create());
+  EXPECT_NE(nullptr, subRegion211);
+
+  auto subRegions2 = rootRegion2->subregions(true);
+  EXPECT_EQ(2, subRegions2.size());
+
+  auto subRegions2NonRecursive = rootRegion2->subregions(false);
+  EXPECT_EQ(1, subRegions2NonRecursive.size());
+
+  auto rootRegion3 =
+      cache.createRegionFactory(RegionShortcut::LOCAL).create("rootRegion3");
+  EXPECT_NE(nullptr, rootRegion3);
+
+  auto subRegions3 = rootRegion3->subregions(true);
+  EXPECT_EQ(0, subRegions3.size());
+}
diff --git a/cppcache/test/TcrMessage_unittest.cpp b/cppcache/test/TcrMessageTest.cpp
similarity index 100%
rename from cppcache/test/TcrMessage_unittest.cpp
rename to cppcache/test/TcrMessageTest.cpp
diff --git a/cppcache/test/util/functionalTests.cpp b/cppcache/test/ThreadPoolTest.cpp
similarity index 51%
copy from cppcache/test/util/functionalTests.cpp
copy to cppcache/test/ThreadPoolTest.cpp
index 410ccbc..f62bf61 100644
--- a/cppcache/test/util/functionalTests.cpp
+++ b/cppcache/test/ThreadPoolTest.cpp
@@ -15,21 +15,42 @@
  * limitations under the License.
  */
 
-#include <string>
+#include <atomic>
+#include <chrono>
+#include <condition_variable>
+#include <deque>
+#include <mutex>
+#include <thread>
 
 #include <gtest/gtest.h>
 
-#include <geode/internal/functional.hpp>
+#include "ThreadPool.hpp"
 
-using namespace apache::geode::client::internal;
+using apache::geode::client::Callable;
+using apache::geode::client::ThreadPool;
 
-TEST(string, geode_hash) {
-  auto&& hash = geode_hash<std::string>{};
+class TestCallable : public Callable {
+ public:
+  TestCallable() : called_(0) {}
 
-  EXPECT_EQ(0, hash(""));
-  EXPECT_EQ(97, hash("a"));
-  EXPECT_EQ(122, hash("z"));
-  EXPECT_EQ(48, hash("0"));
-  EXPECT_EQ(57, hash("9"));
-  EXPECT_EQ(1077910243, hash("supercalifragilisticexpialidocious"));
+  void call() {
+    std::lock_guard<decltype(mutex_)> lock(mutex_);
+    called_++;
+    condition_.notify_all();
+  }
+
+  size_t called_;
+  std::mutex mutex_;
+  std::condition_variable condition_;
+};
+
+TEST(ThreadPoolTest, callableIsCalled) {
+  ThreadPool threadPool(1);
+
+  auto c = std::make_shared<TestCallable>();
+  threadPool.perform(c);
+  std::unique_lock<decltype(c->mutex_)> lock(c->mutex_);
+  c->condition_.wait(lock, [&] { return c->called_ > 0; });
+
+  ASSERT_EQ(1, c->called_);
 }
diff --git a/cppcache/test/util/JavaModifiedUtf8Tests.cpp b/cppcache/test/util/JavaModifiedUtf8Tests.cpp
index 9f37ccc..7f6bda0 100644
--- a/cppcache/test/util/JavaModifiedUtf8Tests.cpp
+++ b/cppcache/test/util/JavaModifiedUtf8Tests.cpp
@@ -16,14 +16,14 @@
  */
 
 #include <string>
+#include <util/JavaModifiedUtf8.hpp>
 
 #include <gtest/gtest.h>
 
-#include <util/JavaModifiedUtf8.hpp>
-
 #include "../ByteArray.hpp"
 
-using namespace apache::geode::client::internal;
+using apache::geode::client::ByteArray;
+using apache::geode::client::internal::JavaModifiedUtf8;
 
 TEST(JavaModifiedUtf8Tests, EncodedLengthFromUtf8) {
   EXPECT_EQ(27, JavaModifiedUtf8::encodedLength("You had me at meat tornado!"));
diff --git a/cppcache/test/util/TestableRecursiveMutex.hpp b/cppcache/test/util/TestableRecursiveMutex.hpp
new file mode 100644
index 0000000..8db755c
--- /dev/null
+++ b/cppcache/test/util/TestableRecursiveMutex.hpp
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+#include <mutex>
+
+class TestableRecursiveMutex {
+ public:
+  std::recursive_mutex mutex_;
+  std::atomic<int32_t> recursive_depth_;
+  std::atomic<int32_t> lock_count_;
+  std::atomic<int32_t> unlock_count_;
+  std::atomic<int32_t> try_lock_count_;
+
+  TestableRecursiveMutex() noexcept
+      : recursive_depth_(0),
+        lock_count_(0),
+        unlock_count_(0),
+        try_lock_count_(0) {}
+
+  void lock() {
+    mutex_.lock();
+    ++recursive_depth_;
+    ++lock_count_;
+  }
+
+  void unlock() {
+    mutex_.unlock();
+    --recursive_depth_;
+    ++unlock_count_;
+  }
+
+  bool try_lock() {
+    bool locked = false;
+    if ((locked = mutex_.try_lock())) {
+      ++recursive_depth_;
+    }
+
+    ++try_lock_count_;
+    return locked;
+  }
+
+  void resetCounters() {
+    recursive_depth_ = 0;
+    lock_count_ = 0;
+    unlock_count_ = 0;
+    try_lock_count_ = 0;
+  }
+};
diff --git a/cppcache/test/util/chrono/durationTest.cpp b/cppcache/test/util/chrono/durationTest.cpp
index 262d212..b436bf7 100644
--- a/cppcache/test/util/chrono/durationTest.cpp
+++ b/cppcache/test/util/chrono/durationTest.cpp
@@ -15,8 +15,8 @@
  * limitations under the License.
  */
 
-#include <chrono>
 #include <algorithm>
+#include <chrono>
 
 #include <gtest/gtest.h>
 
@@ -24,8 +24,10 @@
 
 #include "util/chrono/duration_bounds.hpp"
 
-using namespace apache::geode::internal::chrono::duration;
-using namespace apache::geode::util::chrono::duration;
+using apache::geode::internal::chrono::duration::_ceil;
+using apache::geode::internal::chrono::duration::from_string;
+using apache::geode::internal::chrono::duration::to_string;
+using apache::geode::util::chrono::duration::assert_bounds;
 
 TEST(util_chrono_durationTest, ceil) {
   EXPECT_EQ(std::chrono::seconds(1),
diff --git a/cppcache/test/util/functionalTests.cpp b/cppcache/test/util/functionalTests.cpp
index 410ccbc..f4e0426 100644
--- a/cppcache/test/util/functionalTests.cpp
+++ b/cppcache/test/util/functionalTests.cpp
@@ -21,7 +21,7 @@
 
 #include <geode/internal/functional.hpp>
 
-using namespace apache::geode::client::internal;
+using apache::geode::client::internal::geode_hash;
 
 TEST(string, geode_hash) {
   auto&& hash = geode_hash<std::string>{};
diff --git a/cppcache/test/util/functionalTests.cpp b/cppcache/test/util/queueTest.cpp
similarity index 68%
copy from cppcache/test/util/functionalTests.cpp
copy to cppcache/test/util/queueTest.cpp
index 410ccbc..b5d6861 100644
--- a/cppcache/test/util/functionalTests.cpp
+++ b/cppcache/test/util/queueTest.cpp
@@ -15,21 +15,22 @@
  * limitations under the License.
  */
 
-#include <string>
+#include <deque>
 
 #include <gtest/gtest.h>
 
-#include <geode/internal/functional.hpp>
+#include "util/queue.hpp"
 
-using namespace apache::geode::client::internal;
+using apache::geode::client::queue::coalesce;
 
-TEST(string, geode_hash) {
-  auto&& hash = geode_hash<std::string>{};
+TEST(util_queueTest, coalesce) {
+  auto queue = std::deque<int32_t>({1, 1, 1, 2, 3, 4});
 
-  EXPECT_EQ(0, hash(""));
-  EXPECT_EQ(97, hash("a"));
-  EXPECT_EQ(122, hash("z"));
-  EXPECT_EQ(48, hash("0"));
-  EXPECT_EQ(57, hash("9"));
-  EXPECT_EQ(1077910243, hash("supercalifragilisticexpialidocious"));
+  coalesce(queue, 1);
+  EXPECT_EQ(2, queue.front());
+  EXPECT_EQ(3, queue.size());
+
+  coalesce(queue, 3);
+  EXPECT_EQ(2, queue.front());
+  EXPECT_EQ(3, queue.size());
 }
diff --git a/cppcache/test/util/synchronized_mapTest.cpp b/cppcache/test/util/synchronized_mapTest.cpp
new file mode 100644
index 0000000..2a6f79c
--- /dev/null
+++ b/cppcache/test/util/synchronized_mapTest.cpp
@@ -0,0 +1,373 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+#include <mutex>
+#include <unordered_map>
+
+#include <gtest/gtest.h>
+
+#include "TestableRecursiveMutex.hpp"
+#include "util/synchronized_map.hpp"
+
+using apache::geode::client::synchronized_map;
+
+TEST(synchronized_mapTest, emplaceLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  auto result = map.emplace("a", "A");
+  ASSERT_EQ(true, result.second);
+  EXPECT_EQ("a", result.first->first);
+  EXPECT_EQ("A", result.first->second);
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+
+  result = map.emplace("b", "B");
+  ASSERT_EQ(true, result.second);
+  EXPECT_EQ("b", result.first->first);
+  EXPECT_EQ("B", result.first->second);
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(2, map.mutex().lock_count_);
+  EXPECT_EQ(2, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, eraseKeyLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  auto result = map.erase("a");
+  ASSERT_EQ(1, result);
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, beginLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  const auto& begin = map.begin();
+
+  ASSERT_EQ("a", begin->first);
+
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(0, map.mutex().lock_count_);
+  EXPECT_EQ(0, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, beginConstLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  const auto& constMap = map;
+  const auto& begin = constMap.begin();
+
+  ASSERT_EQ("a", begin->first);
+
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(0, map.mutex().lock_count_);
+  EXPECT_EQ(0, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, cbeginLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  const auto& begin = map.cbegin();
+
+  ASSERT_EQ("a", begin->first);
+
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(0, map.mutex().lock_count_);
+  EXPECT_EQ(0, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, endLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  const auto& begin = map.begin();
+  const auto& end = map.end();
+
+  ASSERT_NE(begin, end);
+
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(0, map.mutex().lock_count_);
+  EXPECT_EQ(0, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, endConsLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  const auto& constMap = map;
+  const auto& begin = constMap.begin();
+  const auto& end = constMap.end();
+
+  ASSERT_NE(begin, end);
+
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(0, map.mutex().lock_count_);
+  EXPECT_EQ(0, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, cendLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  const auto& begin = map.cbegin();
+  const auto& end = map.cend();
+
+  ASSERT_NE(begin, end);
+
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(0, map.mutex().lock_count_);
+  EXPECT_EQ(0, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, emptyLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  ASSERT_TRUE(map.empty());
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  ASSERT_FALSE(map.empty());
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, sizeLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  ASSERT_EQ(0, map.size());
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  ASSERT_EQ(1, map.size());
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, clearLocks) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  map.clear();
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+  EXPECT_TRUE(map.empty());
+}
+
+TEST(synchronized_mapTest, findNotLocked) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  {
+    std::lock_guard<decltype(map)::mutex_type> lock(map.mutex());
+    const auto& entry = map.find("a");
+    EXPECT_EQ(1, map.mutex().recursive_depth_);
+    EXPECT_EQ(1, map.mutex().lock_count_);
+    EXPECT_EQ(0, map.mutex().unlock_count_);
+    EXPECT_EQ("a", entry->first);
+  }
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, findConstNotLocked) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.mutex().resetCounters();
+
+  {
+    auto&& lock = map.make_lock();
+    const auto& constMap = map;
+    const auto& entry = constMap.find("a");
+    EXPECT_EQ(1, map.mutex().recursive_depth_);
+    EXPECT_EQ(1, map.mutex().lock_count_);
+    EXPECT_EQ(0, map.mutex().unlock_count_);
+    EXPECT_EQ("a", entry->first);
+  }
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, iteratorNotLocked) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.emplace("a", "A");
+  map.emplace("b", "B");
+  map.mutex().resetCounters();
+
+  auto& mutex = map.mutex();
+  EXPECT_EQ(0, mutex.recursive_depth_);
+  EXPECT_EQ(0, mutex.recursive_depth_);
+  EXPECT_EQ(0, map.mutex().lock_count_);
+  EXPECT_EQ(0, map.mutex().unlock_count_);
+
+  {
+    for (const auto& entry : map) {
+      EXPECT_EQ(0, mutex.recursive_depth_);
+      EXPECT_EQ(0, map.mutex().lock_count_);
+      EXPECT_EQ(0, map.mutex().unlock_count_);
+    }
+  }
+  EXPECT_EQ(0, mutex.recursive_depth_);
+  EXPECT_EQ(0, map.mutex().lock_count_);
+  EXPECT_EQ(0, map.mutex().unlock_count_);
+
+  {
+    std::lock_guard<decltype(map)::mutex_type> lock(mutex);
+    for (const auto& entry : map) {
+      EXPECT_EQ(1, mutex.recursive_depth_);
+      EXPECT_EQ(1, map.mutex().lock_count_);
+      EXPECT_EQ(0, map.mutex().unlock_count_);
+    }
+  }
+  EXPECT_EQ(0, mutex.recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, make_lockDefault) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  {
+    auto&& lock = map.make_lock();
+    EXPECT_EQ(1, map.mutex().recursive_depth_);
+    EXPECT_EQ(1, map.mutex().lock_count_);
+    EXPECT_EQ(0, map.mutex().unlock_count_);
+  }
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, make_lock_WithUniqueLock) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  {
+    auto&& lock = map.make_lock<std::unique_lock>();
+    EXPECT_EQ(1, map.mutex().recursive_depth_);
+    EXPECT_EQ(1, map.mutex().lock_count_);
+    EXPECT_EQ(0, map.mutex().unlock_count_);
+  }
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, make_lock_WithUniqueLockDefered) {
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  {
+    auto&& lock = map.make_lock<std::unique_lock>(std::defer_lock);
+    EXPECT_EQ(0, map.mutex().recursive_depth_);
+    EXPECT_EQ(0, map.mutex().lock_count_);
+    EXPECT_EQ(0, map.mutex().unlock_count_);
+    lock.lock();
+    EXPECT_EQ(1, map.mutex().recursive_depth_);
+    EXPECT_EQ(1, map.mutex().lock_count_);
+    EXPECT_EQ(0, map.mutex().unlock_count_);
+  }
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+}
+
+TEST(synchronized_mapTest, insertIteratorIteratorLocks) {
+  std::unordered_map<std::string, std::string> source = {{"a", "A"},
+                                                         {"b", "B"}};
+
+  synchronized_map<std::unordered_map<std::string, std::string>,
+                   TestableRecursiveMutex>
+      map;
+
+  map.insert(source.begin(), source.end());
+  EXPECT_EQ(0, map.mutex().recursive_depth_);
+  EXPECT_EQ(1, map.mutex().lock_count_);
+  EXPECT_EQ(1, map.mutex().unlock_count_);
+  EXPECT_EQ(2, map.size());
+}
diff --git a/cppcache/test/util/synchronized_setTest.cpp b/cppcache/test/util/synchronized_setTest.cpp
new file mode 100644
index 0000000..98f8563
--- /dev/null
+++ b/cppcache/test/util/synchronized_setTest.cpp
@@ -0,0 +1,373 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+#include <mutex>
+#include <unordered_set>
+
+#include <gtest/gtest.h>
+
+#include "TestableRecursiveMutex.hpp"
+#include "util/synchronized_set.hpp"
+
+using apache::geode::client::synchronized_set;
+
+TEST(synchronized_setTest, emplaceLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  auto result = set.emplace("a");
+  ASSERT_EQ(true, result.second);
+  EXPECT_EQ("a", *result.first);
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+
+  result = set.emplace("b");
+  ASSERT_EQ(true, result.second);
+  EXPECT_EQ("b", *result.first);
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(2, set.mutex().lock_count_);
+  EXPECT_EQ(2, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, eraseKeyLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  auto result = set.erase("a");
+  ASSERT_EQ(1, result);
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, beginLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  const auto& begin = set.begin();
+
+  ASSERT_EQ("a", *begin);
+
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(0, set.mutex().lock_count_);
+  EXPECT_EQ(0, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, beginConstLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  const auto& constMap = set;
+  const auto& begin = constMap.begin();
+
+  ASSERT_EQ("a", *begin);
+
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(0, set.mutex().lock_count_);
+  EXPECT_EQ(0, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, cbeginLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  const auto& begin = set.cbegin();
+
+  ASSERT_EQ("a", *begin);
+
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(0, set.mutex().lock_count_);
+  EXPECT_EQ(0, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, endLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  const auto& begin = set.begin();
+  const auto& end = set.end();
+
+  ASSERT_NE(begin, end);
+
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(0, set.mutex().lock_count_);
+  EXPECT_EQ(0, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, endConsLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  const auto& constMap = set;
+  const auto& begin = constMap.begin();
+  const auto& end = constMap.end();
+
+  ASSERT_NE(begin, end);
+
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(0, set.mutex().lock_count_);
+  EXPECT_EQ(0, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, cendLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  const auto& begin = set.cbegin();
+  const auto& end = set.cend();
+
+  ASSERT_NE(begin, end);
+
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(0, set.mutex().lock_count_);
+  EXPECT_EQ(0, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, emptyLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  ASSERT_TRUE(set.empty());
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  ASSERT_FALSE(set.empty());
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, sizeLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  ASSERT_EQ(0, set.size());
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  ASSERT_EQ(1, set.size());
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, clearLocks) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  set.clear();
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+  EXPECT_TRUE(set.empty());
+}
+
+TEST(synchronized_setTest, findNotLocked) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  {
+    std::lock_guard<decltype(set)::mutex_type> lock(set.mutex());
+    const auto& entry = set.find("a");
+    EXPECT_EQ(1, set.mutex().recursive_depth_);
+    EXPECT_EQ(1, set.mutex().lock_count_);
+    EXPECT_EQ(0, set.mutex().unlock_count_);
+    EXPECT_EQ("a", *entry);
+  }
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, findConstNotLocked) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.mutex().resetCounters();
+
+  {
+    auto&& lock = set.make_lock();
+    const auto& constMap = set;
+    const auto& entry = constMap.find("a");
+    EXPECT_EQ(1, set.mutex().recursive_depth_);
+    EXPECT_EQ(1, set.mutex().lock_count_);
+    EXPECT_EQ(0, set.mutex().unlock_count_);
+    EXPECT_EQ("a", *entry);
+  }
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, iteratorNotLocked) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.emplace("a");
+  set.emplace("b");
+  set.mutex().resetCounters();
+
+  auto& mutex = set.mutex();
+  EXPECT_EQ(0, mutex.recursive_depth_);
+  EXPECT_EQ(0, mutex.recursive_depth_);
+  EXPECT_EQ(0, set.mutex().lock_count_);
+  EXPECT_EQ(0, set.mutex().unlock_count_);
+
+  {
+    for (const auto& entry : set) {
+      EXPECT_EQ(0, mutex.recursive_depth_);
+      EXPECT_EQ(0, set.mutex().lock_count_);
+      EXPECT_EQ(0, set.mutex().unlock_count_);
+    }
+  }
+  EXPECT_EQ(0, mutex.recursive_depth_);
+  EXPECT_EQ(0, set.mutex().lock_count_);
+  EXPECT_EQ(0, set.mutex().unlock_count_);
+
+  {
+    std::lock_guard<decltype(set)::mutex_type> lock(mutex);
+    for (const auto& entry : set) {
+      EXPECT_EQ(1, mutex.recursive_depth_);
+      EXPECT_EQ(1, set.mutex().lock_count_);
+      EXPECT_EQ(0, set.mutex().unlock_count_);
+    }
+  }
+  EXPECT_EQ(0, mutex.recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, make_lockDefault) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  {
+    auto&& lock = set.make_lock();
+    EXPECT_EQ(1, set.mutex().recursive_depth_);
+    EXPECT_EQ(1, set.mutex().lock_count_);
+    EXPECT_EQ(0, set.mutex().unlock_count_);
+  }
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, make_lock_WithUniqueLock) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  {
+    auto&& lock = set.make_lock<std::unique_lock>();
+    EXPECT_EQ(1, set.mutex().recursive_depth_);
+    EXPECT_EQ(1, set.mutex().lock_count_);
+    EXPECT_EQ(0, set.mutex().unlock_count_);
+  }
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, make_lock_WithUniqueLockDefered) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  {
+    auto&& lock = set.make_lock<std::unique_lock>(std::defer_lock);
+    EXPECT_EQ(0, set.mutex().recursive_depth_);
+    EXPECT_EQ(0, set.mutex().lock_count_);
+    EXPECT_EQ(0, set.mutex().unlock_count_);
+    lock.lock();
+    EXPECT_EQ(1, set.mutex().recursive_depth_);
+    EXPECT_EQ(1, set.mutex().lock_count_);
+    EXPECT_EQ(0, set.mutex().unlock_count_);
+  }
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+}
+
+TEST(synchronized_setTest, insertIteratorIteratorLocks) {
+  std::unordered_set<std::string> source = {"a", "b"};
+
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.insert(source.begin(), source.end());
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+  EXPECT_EQ(2, set.size());
+}
+
+TEST(synchronized_setTest, insertRvalue) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  set.insert("a");
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+  EXPECT_EQ(1, set.size());
+}
+
+TEST(synchronized_setTest, insertLvalue) {
+  synchronized_set<std::unordered_set<std::string>, TestableRecursiveMutex> set;
+
+  std::string value = "a";
+  set.insert(value);
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+  EXPECT_EQ(1, set.size());
+}
+
+TEST(synchronized_setTest, compilesWithStdSet) {
+  synchronized_set<std::set<std::string>, TestableRecursiveMutex> set;
+
+  auto result = set.emplace("a");
+  ASSERT_EQ(true, result.second);
+  EXPECT_EQ("a", *result.first);
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(1, set.mutex().lock_count_);
+  EXPECT_EQ(1, set.mutex().unlock_count_);
+
+  result = set.emplace("b");
+  ASSERT_EQ(true, result.second);
+  EXPECT_EQ("b", *result.first);
+  EXPECT_EQ(0, set.mutex().recursive_depth_);
+  EXPECT_EQ(2, set.mutex().lock_count_);
+  EXPECT_EQ(2, set.mutex().unlock_count_);
+}


Mime
View raw message