geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kl...@apache.org
Subject [23/63] [abbrv] incubator-geode git commit: GEODE-1059: PRQueryDUnitHelper no longer inherits PartitionedRegionDUnitTestCase class
Date Wed, 04 May 2016 22:57:23 GMT
GEODE-1059: PRQueryDUnitHelper no longer inherits PartitionedRegionDUnitTestCase class

* PRQueryDUnitHelper class no longer inherits PartitionedRegionDUnitTestCase hence no more calls to its constructor.
* All the methods for creating PortfolioData elements were moved to Utils class.
* Removed the file PRQueryPerfDUnitTest.java

This closes #132


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/6fb84d96
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/6fb84d96
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/6fb84d96

Branch: refs/heads/feature/GEODE-1276
Commit: 6fb84d96294e50f3f4d1914a66d82dd1da4dceff
Parents: 6b4cdb1
Author: nabarun <nnag@pivotal.io>
Authored: Wed Apr 13 15:38:44 2016 -0700
Committer: Dan Smith <upthewaterspout@apache.org>
Committed: Wed Apr 27 15:59:51 2016 -0700

----------------------------------------------------------------------
 .../com/gemstone/gemfire/cache/query/Utils.java |  38 +
 .../dunit/QueryDataInconsistencyDUnitTest.java  |   2 -
 .../QueryUsingFunctionContextDUnitTest.java     |   8 +-
 .../QueryREUpdateInProgressJUnitTest.java       |  12 +-
 ...rrentIndexInitOnOverflowRegionDUnitTest.java |  27 +-
 ...ndexOperationsOnOverflowRegionDUnitTest.java |  97 +--
 ...pdateWithInplaceObjectModFalseDUnitTest.java |  46 +-
 ...ConcurrentIndexUpdateWithoutWLDUnitTest.java |  48 +-
 ...itializeIndexEntryDestroyQueryDUnitTest.java |  96 +--
 .../PRBasicIndexCreationDUnitTest.java          | 302 +++----
 .../PRBasicIndexCreationDeadlockDUnitTest.java  |  42 +-
 .../PRBasicMultiIndexCreationDUnitTest.java     | 267 ++----
 .../partitioned/PRBasicQueryDUnitTest.java      |  36 +-
 .../PRBasicRemoveIndexDUnitTest.java            |  37 +-
 .../PRColocatedEquiJoinDUnitTest.java           | 106 +--
 .../partitioned/PRInvalidQueryDUnitTest.java    |  26 +-
 .../partitioned/PRQueryCacheCloseDUnitTest.java |  81 +-
 .../query/partitioned/PRQueryDUnitHelper.java   | 818 +++----------------
 .../query/partitioned/PRQueryDUnitTest.java     |  87 +-
 .../query/partitioned/PRQueryPerfDUnitTest.java | 504 ------------
 .../PRQueryRegionCloseDUnitTest.java            |  28 +-
 .../PRQueryRegionDestroyedDUnitTest.java        |  36 +-
 .../PRQueryRemoteNodeExceptionDUnitTest.java    |  48 +-
 .../gemfire/management/QueryDataDUnitTest.java  |   6 +-
 24 files changed, 805 insertions(+), 1993 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/Utils.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/Utils.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/Utils.java
index ddd3a16..a34d049 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/Utils.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/Utils.java
@@ -25,6 +25,12 @@ package com.gemstone.gemfire.cache.query;
 import java.util.Collection;
 import java.util.Iterator;
 
+import com.gemstone.gemfire.cache.query.data.Portfolio;
+import com.gemstone.gemfire.cache.query.data.PortfolioData;
+import com.gemstone.gemfire.cache.query.data.Position;
+
+import parReg.query.unittest.NewPortfolio;
+
 /**
  *
  */
@@ -53,4 +59,36 @@ public class Utils {
       sb.append(r);
     return sb.toString();
   }
+  /**
+   * This function <br>
+   * 1. The Creates an array of PortfolioData objects
+   *
+   * @return PortFolioData Objects
+   */
+
+  public static PortfolioData[] createPortfolioData(final int cnt, final int cntDest) {
+    PortfolioData[] portfolio = new PortfolioData[cntDest];
+    for (int k = cnt; k < cntDest; k++) {
+      portfolio[k] = new PortfolioData(k);
+    }
+    return portfolio;
+  }
+
+  public static Portfolio[] createPortfoliosAndPositions(int count) {
+    Position.cnt = 0; // reset Portfolio counter
+    Portfolio[] portfolios = new Portfolio[count];
+    for (int i = 0; i < count; i++) {
+      portfolios[i] = new Portfolio(i);
+    }
+    return portfolios;
+  }
+
+  public static NewPortfolio[] createNewPortfoliosAndPositions(int count) {
+    Position.cnt = 0; // reset Portfolio counter
+    NewPortfolio[] portfolios = new NewPortfolio[count];
+    for (int i = 0; i < count; i++) {
+      portfolios[i] = new NewPortfolio("" + i, i);
+    }
+    return portfolios;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
index c5f5140..475ad49 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
@@ -84,8 +84,6 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
   public static String[] queriesForRR = new String[] { "<trace> select * from /"
       + repRegionName + " where ID=1" };
 
-  private static PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
-
   public static volatile boolean hooked = false;
   /**
    * @param name

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
index 08626de..1d60010 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.cache.query.dunit;
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfoliosAndPositions;
+
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
@@ -125,13 +127,11 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
   
   public static String[] queriesForRR = new String[]{"<trace> select * from /"+repRegionName+" where ID>=0"};
 
-  private static PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
   /**
    * @param name
    */
   public QueryUsingFunctionContextDUnitTest(String name) {
     super(name);
-
   }
 
   @Override
@@ -671,7 +671,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
 
   public void fillValuesInRegions() {
     //Create common Portflios and NewPortfolios
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
 
     //Fill local region
     server1.invoke(getCacheSerializableRunnableForPRPuts(localRegionName,
@@ -1015,7 +1015,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
           region.put(new Integer(j), portfolio[j]);
         LogWriterUtils.getLogWriter()
             .info(
-                "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
+                "getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
                     + regionName);
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/QueryREUpdateInProgressJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/QueryREUpdateInProgressJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/QueryREUpdateInProgressJUnitTest.java
index e7681b5..9a48929 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/QueryREUpdateInProgressJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/QueryREUpdateInProgressJUnitTest.java
@@ -19,6 +19,7 @@
  */
 package com.gemstone.gemfire.cache.query.functional;
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfoliosAndPositions;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -39,7 +40,6 @@ import com.gemstone.gemfire.cache.query.CacheUtils;
 import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.SelectResults;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
-import com.gemstone.gemfire.cache.query.partitioned.PRQueryDUnitHelper;
 import com.gemstone.gemfire.cache.query.types.ObjectType;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion.NonTXEntry;
@@ -368,21 +368,21 @@ public class QueryREUpdateInProgressJUnitTest {
       if ((r[j][0] != null) && (r[j][1] != null)) {
         type1 = ((SelectResults) r[j][0]).getCollectionType().getElementType();
         assertNotNull(
-            "PRQueryDUnitHelper#compareTwoQueryResults: Type 1 is NULL "
+            "#compareTwoQueryResults: Type 1 is NULL "
                 + type1, type1);
         type2 = ((SelectResults) r[j][1]).getCollectionType().getElementType();
         assertNotNull(
-            "PRQueryDUnitHelper#compareTwoQueryResults: Type 2 is NULL "
+            "#compareTwoQueryResults: Type 2 is NULL "
                 + type2, type2);
         if ( !(type1.getClass().getName()).equals(type2.getClass().getName()) ) {
-          fail("PRQueryDUnitHelper#compareTwoQueryResults: FAILED:Search result Type is different in both the cases: " 
+          fail("#compareTwoQueryResults: FAILED:Search result Type is different in both the cases: "
               + type1.getClass().getName() + " "
               + type2.getClass().getName());
         }
         int size0 = ((SelectResults) r[j][0]).size();
         int size1 = ((SelectResults) r[j][1]).size();
         if (size0 != size1) {
-          fail("PRQueryDUnitHelper#compareTwoQueryResults: FAILED:Search resultSet size are different in both cases; size0="
+          fail("#compareTwoQueryResults: FAILED:Search resultSet size are different in both cases; size0="
               + size0 + ";size1=" + size1 + ";j=" + j);
         }
       }
@@ -401,7 +401,7 @@ public class QueryREUpdateInProgressJUnitTest {
 
   private void putREWithUpdateInProgressTrue(String region) {
     Region reg = CacheUtils.getRegion(region);
-    Portfolio[] values = new PRQueryDUnitHelper("").createPortfoliosAndPositions(numOfEntries);
+    Portfolio[] values = createPortfoliosAndPositions(numOfEntries);
 
     int i=0;
     for (Object val: values) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
index 19b1dfb..4dc2890 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
@@ -39,7 +39,6 @@ import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.cache.query.internal.index.IndexManager.TestHook;
-import com.gemstone.gemfire.cache.query.partitioned.PRQueryDUnitHelper;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
@@ -58,8 +57,6 @@ import com.gemstone.gemfire.test.dunit.Wait;
  */
 public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase {
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
-
   String name;
 
   final int redundancy = 0;
@@ -92,7 +89,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         "Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         IndexManager.testHook = null;
         try {
@@ -140,10 +137,10 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i = 0; i < 100; i++) {
           r.put(i, new PortfolioData(i));
@@ -153,7 +150,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         IndexManager.testHook = new IndexManagerTestHook();
 
         // Destroy one of the values.
-        PRQHelp.getCache().getLogger().fine("Destroying the value");
+        getCache().getLogger().fine("Destroying the value");
         r.destroy(1);
 
         IndexManager.testHook = null;
@@ -165,7 +162,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
         while (!hooked) {
           Wait.pause(100);
@@ -204,7 +201,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         "Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         
         Region partitionRegion = null;
         IndexManager.testHook = null;
@@ -305,7 +302,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
         while (!hooked) {
           Wait.pause(100);
@@ -355,7 +352,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
       
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region region = cache.createRegionFactory(RegionShortcut.LOCAL).create(regionName);
         QueryService qService = cache.getQueryService();
         
@@ -391,7 +388,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
       @Override
       public void run2() throws CacheException {
         
-        Region region = PRQHelp.getCache().getRegion(regionName);
+        Region region = getCache().getRegion(regionName);
         for (int i=0; i<100; i++) {
           if (i == 50) IndexManager.testHook = new LocalTestHook();
           region.put(i, new Portfolio(i));
@@ -405,7 +402,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
       
       @Override
       public void run2() throws CacheException {
-        Region region = PRQHelp.getCache().getRegion(regionName);
+        Region region = getCache().getRegion(regionName);
         
         while(!hooked) {
           Wait.pause(100);
@@ -417,7 +414,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         }
 
         try {
-            QueryService qservice = PRQHelp.getCache().getQueryService();
+            QueryService qservice = getCache().getQueryService();
             Index index = qservice.getIndex(region, "idIndex");
             if (((CompactRangeIndex)index).getIndexStorage().size() > 1) {
               fail("After clear region size is supposed to be zero as all index updates are blocked. Current region size is: "+ region.size());
@@ -436,7 +433,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
       
       @Override
       public void run2() throws CacheException {
-        Region region = PRQHelp.getCache().getRegion(regionName);
+        Region region = getCache().getRegion(regionName);
         if (region.size() > 50) {
           fail("After clear region size is supposed to be 50 as all index updates are blocked " + region.size());
         }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
index 25e4166..466483d 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
@@ -29,7 +29,6 @@ import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.DiskStore;
 import com.gemstone.gemfire.cache.EvictionAction;
 import com.gemstone.gemfire.cache.EvictionAlgorithm;
-import com.gemstone.gemfire.cache.EvictionAttributes;
 import com.gemstone.gemfire.cache.PartitionAttributesFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionFactory;
@@ -37,13 +36,9 @@ import com.gemstone.gemfire.cache.query.Index;
 import com.gemstone.gemfire.cache.query.Query;
 import com.gemstone.gemfire.cache.query.SelectResults;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
-import com.gemstone.gemfire.cache.query.internal.QueryObserverAdapter;
-import com.gemstone.gemfire.cache.query.internal.QueryObserverHolder;
-import com.gemstone.gemfire.cache.query.partitioned.PRQueryDUnitHelper;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
-import com.gemstone.gemfire.internal.cache.PartitionedRegionQueryEvaluator.TestHook;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
@@ -64,16 +59,8 @@ import com.gemstone.gemfire.test.dunit.Wait;
 public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
     CacheTestCase {
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
-
   String name;
 
-  final int redundancy = 0;
-
-  private int cnt=0;
-
-  private int cntDest=1;
-
   public static volatile boolean hooked = false;
 
   /**
@@ -95,7 +82,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
     vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         IndexManager.testHook = null;
         try {
@@ -138,10 +125,10 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i=0; i<100; i++) {
           r.put(i, new PortfolioData(i));
@@ -151,7 +138,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         IndexManager.testHook = new IndexManagerTestHook();
 
         // Destroy one of the values.
-        PRQHelp.getCache().getLogger().fine("Destroying the value");
+        getCache().getLogger().fine("Destroying the value");
         r.destroy(1);
 
         IndexManager.testHook = null;
@@ -162,16 +149,16 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
-        Query statusQuery = PRQHelp.getCache().getQueryService()
+        Query statusQuery = getCache().getQueryService()
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
           Wait.pause(100);
         }
         try {
-          PRQHelp.getCache().getLogger().fine("Querying the region");
+          getCache().getLogger().fine("Querying the region");
           SelectResults results = (SelectResults)statusQuery.execute();
           assertEquals(100, results.size());
         } catch (Exception e) {
@@ -197,7 +184,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
     vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         IndexManager.testHook = null;
         try {
@@ -240,10 +227,10 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i=0; i<100; i++) {
           r.put(i, new PortfolioData(i));
@@ -253,7 +240,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         IndexManager.testHook = new IndexManagerTestHook();
 
         // Destroy one of the values.
-        PRQHelp.getCache().getLogger().fine("Destroying the value");
+        getCache().getLogger().fine("Destroying the value");
         r.destroy(1);
 
         IndexManager.testHook = null;
@@ -264,16 +251,16 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
-        Query statusQuery = PRQHelp.getCache().getQueryService()
+        Query statusQuery = getCache().getQueryService()
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
           Wait.pause(100);
         }
         try {
-          PRQHelp.getCache().getLogger().fine("Querying the region");
+          getCache().getLogger().fine("Querying the region");
           SelectResults results = (SelectResults)statusQuery.execute();
           assertEquals(100, results.size());
         } catch (Exception e) {
@@ -300,7 +287,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         "Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         IndexManager.testHook = null;
         try {
@@ -348,10 +335,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i = 0; i < 100; i++) {
           r.put(i, new PortfolioData(i));
@@ -361,7 +346,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         IndexManager.testHook = new IndexManagerTestHook();
 
         // Destroy one of the values.
-        PRQHelp.getCache().getLogger().fine("Destroying the value");
+        getCache().getLogger().fine("Destroying the value");
         r.destroy(1);
 
         IndexManager.testHook = null;
@@ -373,16 +358,14 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
-        Query statusQuery = PRQHelp.getCache().getQueryService()
+        Query statusQuery = getCache().getQueryService()
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
           Wait.pause(100);
         }
         try {
-          PRQHelp.getCache().getLogger().fine("Querying the region");
+          getCache().getLogger().fine("Querying the region");
           SelectResults results = (SelectResults)statusQuery.execute();
           assertEquals(100, results.size());
         } catch (Exception e) {
@@ -409,7 +392,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         "Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         IndexManager.testHook = null;
         try {
@@ -457,10 +440,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i = 0; i < 100; i++) {
           r.put(i, new PortfolioData(i));
@@ -470,7 +451,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         IndexManager.testHook = new IndexManagerTestHook();
 
         // Destroy one of the values.
-        PRQHelp.getCache().getLogger().fine("Destroying the value");
+        getCache().getLogger().fine("Destroying the value");
         r.destroy(1);
 
         IndexManager.testHook = null;
@@ -482,16 +463,14 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
-        Query statusQuery = PRQHelp.getCache().getQueryService()
+        Query statusQuery = getCache().getQueryService()
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
           Wait.pause(100);
         }
         try {
-          PRQHelp.getCache().getLogger().fine("Querying the region");
+          getCache().getLogger().fine("Querying the region");
           SelectResults results = (SelectResults)statusQuery.execute();
           assertEquals(100, results.size());
         } catch (Exception e) {
@@ -517,7 +496,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
+       Cache cache = getCache();
        Region partitionRegion = null;
        IndexManager.testHook = null;
        try {
@@ -551,10 +530,10 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
+       Cache cache = getCache();
 
        // Do a put in region.
-       Region r = PRQHelp.getCache().getRegion(name);
+       Region r = getCache().getRegion(name);
 
        for (int i=0; i<100; i++) {
          r.put(i, new PortfolioData(i));
@@ -564,7 +543,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
        IndexManager.testHook = new IndexManagerNoWaitTestHook();
 
        // Destroy one of the values.
-       PRQHelp.getCache().getLogger().fine("Destroying the value");
+       getCache().getLogger().fine("Destroying the value");
        r.destroy(1);
 
        IndexManager.testHook = null;
@@ -575,16 +554,16 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
+       Cache cache = getCache();
 
-       Query statusQuery = PRQHelp.getCache().getQueryService()
+       Query statusQuery = getCache().getQueryService()
            .newQuery("select * from /" + name + " p where p.ID > -1");
 
        while (!hooked) {
          Wait.pause(10);
        }
        try {
-         PRQHelp.getCache().getLogger().fine("Querying the region");
+         getCache().getLogger().fine("Querying the region");
          SelectResults results = (SelectResults)statusQuery.execute();
          assertEquals(100, results.size());
        } catch (Exception e) {
@@ -610,7 +589,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
+       Cache cache = getCache();
        Region partitionRegion = null;
        IndexManager.testHook = null;
        try {
@@ -644,10 +623,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
-
        // Do a put in region.
-       Region r = PRQHelp.getCache().getRegion(name);
+       Region r = getCache().getRegion(name);
 
        for (int i=0; i<100; i++) {
          r.put(i, new PortfolioData(i));
@@ -657,7 +634,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
        IndexManager.testHook = new IndexManagerNoWaitTestHook();
 
        // Destroy one of the values.
-       PRQHelp.getCache().getLogger().fine("Destroying the value");
+       getCache().getLogger().fine("Destroying the value");
        r.destroy(1);
 
        IndexManager.testHook = null;
@@ -668,16 +645,14 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
-
-       Query statusQuery = PRQHelp.getCache().getQueryService()
+       Query statusQuery = getCache().getQueryService()
            .newQuery("select * from /" + name + " p where p.ID > -1");
 
        while (!hooked) {
          Wait.pause(10);
        }
        try {
-         PRQHelp.getCache().getLogger().fine("Querying the region");
+         getCache().getLogger().fine("Querying the region");
          SelectResults results = (SelectResults)statusQuery.execute();
          assertEquals(100, results.size());
        } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
index 8034931..6a49628 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
@@ -25,6 +25,8 @@ import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheException;
+import com.gemstone.gemfire.cache.CacheExistsException;
+import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.query.Index;
 import com.gemstone.gemfire.cache.query.IndexStatistics;
@@ -64,10 +66,10 @@ import com.gemstone.gemfire.test.junit.categories.FlakyTest;
  */
 public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends DistributedTestCase {
   
-  PRQueryDUnitHelper helper = new PRQueryDUnitHelper("ConcurrentIndexUpdateWithoutWLDUnitTest");
+  PRQueryDUnitHelper helper = new PRQueryDUnitHelper();
   private static String regionName = "Portfolios";
   private int redundancy = 1;
-  
+
   // CompactRangeIndex
   private String indexName = "idIndex";
   private String indexedExpression = "ID";
@@ -81,6 +83,31 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends Dis
 
   int stepSize = 10;
   private int totalDataSize = 50;
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> getAvailableCacheElseCreateCache());
+    }
+  }
+  private final void getAvailableCacheElseCreateCache() {
+    synchronized(ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.class) {
+      try {
+        Cache newCache = GemFireCacheImpl.getInstance();
+        if(null == newCache) {
+          System.setProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE", "true");
+          newCache = CacheFactory.create(getSystem());
+        }
+        PRQueryDUnitHelper.setCache(newCache);
+      } catch (CacheExistsException e) {
+        Assert.fail("the cache already exists", e); // TODO: remove error handling
+      } catch (RuntimeException ex) {
+        throw ex;
+      } catch (Exception ex) {
+        Assert.fail("Checked exception while initializing cache??", ex);
+      } finally {
+        System.clearProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE");
+      }
+    }
+  }
 
   /**
    * @param name
@@ -132,8 +159,8 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends Dis
     // Create a Local Region.
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-    
-    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName, Portfolio.class));
+    setCacheInVMs(vm0);
+    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName));
     
     vm0.invoke(helper.getCacheSerializableRunnableForPRIndexCreate(regionName, indexName, indexedExpression, fromClause, alias));
     
@@ -177,10 +204,9 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends Dis
   public void testRangeIndex() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    
-    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName, Portfolio.class));
-    
+    setCacheInVMs(vm0);
+    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName));
+
     vm0.invoke(helper.getCacheSerializableRunnableForPRIndexCreate(regionName, rindexName, rindexedExpression, rfromClause, ralias));
     
     AsyncInvocation[] asyncInvs = new AsyncInvocation[2];
@@ -209,7 +235,7 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends Dis
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);    
-
+    setCacheInVMs(vm0,vm1, vm2, vm3);
     vm0.invoke(helper.getCacheSerializableRunnableForPRAccessorCreate(regionName, redundancy, Portfolio.class));
     
     vm1.invoke(helper.getCacheSerializableRunnableForPRCreate(regionName, redundancy, Portfolio.class));
@@ -272,7 +298,7 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends Dis
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);    
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     vm0.invoke(helper.getCacheSerializableRunnableForPRAccessorCreate(regionName, redundancy, Portfolio.class));
     
     vm1.invoke(helper.getCacheSerializableRunnableForPRCreate(regionName, redundancy, Portfolio.class));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
index 28a8f77..7aa3307 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
@@ -24,6 +24,8 @@ import java.util.Collection;
 
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheException;
+import com.gemstone.gemfire.cache.CacheExistsException;
+import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.query.Index;
 import com.gemstone.gemfire.cache.query.IndexStatistics;
@@ -62,7 +64,7 @@ import com.gemstone.gemfire.test.dunit.ThreadUtils;
 public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     DistributedTestCase {
   
-  PRQueryDUnitHelper helper = new PRQueryDUnitHelper("ConcurrentIndexUpdateWithoutWLDUnitTest");
+  PRQueryDUnitHelper helper = new PRQueryDUnitHelper();
   private static String regionName = "Portfolios";
   private int redundancy = 1;
   
@@ -87,6 +89,31 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     super(name);
   }
 
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> getAvailableCacheElseCreateCache());
+    }
+  }
+  private final void getAvailableCacheElseCreateCache() {
+    synchronized(ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.class) {
+      try {
+        Cache newCache = GemFireCacheImpl.getInstance();
+        if(null == newCache) {
+          System.setProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE", "true");
+          newCache = CacheFactory.create(getSystem());
+        }
+        PRQueryDUnitHelper.setCache(newCache);
+      } catch (CacheExistsException e) {
+        Assert.fail("the cache already exists", e); // TODO: remove error handling
+      } catch (RuntimeException ex) {
+        throw ex;
+      } catch (Exception ex) {
+        Assert.fail("Checked exception while initializing cache??", ex);
+      } finally {
+        System.clearProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE");
+      }
+    }
+  }
   /**
    * Tear down a PartitionedRegionTestCase by cleaning up the existing cache
    * (mainly because we want to destroy any existing PartitionedRegions)
@@ -109,8 +136,8 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     // Create a Local Region.
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-    
-    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName, Portfolio.class));
+    setCacheInVMs(vm0);
+    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName));
     
     vm0.invoke(helper.getCacheSerializableRunnableForPRIndexCreate(regionName, indexName, indexedExpression, fromClause, alias));
     
@@ -138,8 +165,8 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     // Create a Local Region.
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(1);
-    
-    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName, Portfolio.class));
+    setCacheInVMs(vm0);
+    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName));
  
     ArrayList<String> names = new ArrayList<String>();
     names.add(indexName);
@@ -193,9 +220,8 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
   public void testRangeIndex() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    
-    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName, Portfolio.class));
+    setCacheInVMs(vm0);
+    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName));
     
     vm0.invoke(helper.getCacheSerializableRunnableForPRIndexCreate(regionName, rindexName, rindexedExpression, rfromClause, ralias));
     
@@ -225,7 +251,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);    
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     vm0.invoke(helper.getCacheSerializableRunnableForPRAccessorCreate(regionName, redundancy, Portfolio.class));
     
     vm1.invoke(helper.getCacheSerializableRunnableForPRCreate(regionName, redundancy, Portfolio.class));
@@ -287,7 +313,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);    
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     vm0.invoke(helper.getCacheSerializableRunnableForPRAccessorCreate(regionName, redundancy, Portfolio.class));
     
     vm1.invoke(helper.getCacheSerializableRunnableForPRCreate(regionName, redundancy, Portfolio.class));
@@ -348,7 +374,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);    
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     vm0.invoke(helper.getCacheSerializableRunnableForPRAccessorCreate(regionName, redundancy, Portfolio.class));
     
     vm1.invoke(helper.getCacheSerializableRunnableForPRCreate(regionName, redundancy, Portfolio.class));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
index 0311f38..6e064f1 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.cache.query.internal.index;
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfolioData;
+
 import java.util.Arrays;
 
 import org.junit.experimental.categories.Category;
@@ -52,7 +54,7 @@ import com.gemstone.gemfire.test.junit.categories.FlakyTest;
  */
 public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   String name;
 
@@ -71,16 +73,21 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
   public InitializeIndexEntryDestroyQueryDUnitTest(String name) {
     super(name);
   }
-
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
   public void testAsyncIndexInitDuringEntryDestroyAndQuery() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
+    setCacheInVMs(vm0);
     name = "PartionedPortfolios";
     //Create Local Region
     vm0.invoke(new CacheSerializableRunnable("Create local region with asynchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region localRegion = null;
         try {
           AttributesFactory attr = new AttributesFactory();
@@ -100,7 +107,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
     });
 
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -110,11 +117,9 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
       @Override
       public void run2() throws CacheException {
 
-        Region r = PRQHelp.getCache().getRegion(name);
-
         for (int i=0; i<cntDest; i++) {
           //Create Index first to go in hook.
-          Cache cache = PRQHelp.getCache();
+          Cache cache = getCache();
           Index index = null;
           try {
             index = cache.getQueryService().createIndex("statusIndex", "p.status", "/"+name+" p");
@@ -126,7 +131,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
           Wait.pause(100);
 
-          PRQHelp.getCache().getQueryService().removeIndex(index);
+          getCache().getQueryService().removeIndex(index);
 
           Wait.pause(100);
         }
@@ -138,22 +143,20 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i=0, j=0; i<1000; i++,j++) {
 
           PortfolioData p = (PortfolioData)r.get(j);
 
-          PRQHelp.getCache().getLogger().fine("Going to destroy the value" + p);
+          getCache().getLogger().fine("Going to destroy the value" + p);
           r.destroy(j);
 
           Wait.pause(100);
 
           //Put the value back again.
-          PRQHelp.getCache().getLogger().fine("Putting the value back" + p);
+          getCache().getLogger().fine("Putting the value back" + p);
           r.put(j, p);
 
           //Reset j
@@ -168,12 +171,10 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
-        Query query = PRQHelp.getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active'");
+        Query query = getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active'");
 
         //Now run the query
         SelectResults results = null;
@@ -182,7 +183,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
         for (int i=0; i<500; i++) {
 
           try {
-            PRQHelp.getCache().getLogger().fine("Querying the region");
+            getCache().getLogger().fine("Querying the region");
             results = (SelectResults)query.execute();
           } catch (Exception e) {
             e.printStackTrace();
@@ -212,13 +213,13 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
   public void testAsyncIndexInitDuringEntryDestroyAndQueryOnPR() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     name = "PartionedPortfoliosPR";
     //Create Local Region
     vm0.invoke(new CacheSerializableRunnable("Create local region with asynchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         try {
           AttributesFactory attr = new AttributesFactory();
@@ -237,7 +238,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
     });
 
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -246,12 +247,9 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-
-        Region r = PRQHelp.getCache().getRegion(name);
-
         for (int i=0; i<cntDest; i++) {
           //Create Index first to go in hook.
-          Cache cache = PRQHelp.getCache();
+          Cache cache = getCache();
           Index index = null;
           try {
             index = cache.getQueryService().createIndex("statusIndex", "p.status", "/"+name+" p");
@@ -261,11 +259,8 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
           }
           assertNotNull(index);
 
-          //pause(100);
+          getCache().getQueryService().removeIndex(index);
 
-          PRQHelp.getCache().getQueryService().removeIndex(index);
-
-          //pause(100);
         }
       }
     });
@@ -275,22 +270,20 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i=0, j=0; i<1000; i++,j++) {
 
           PortfolioData p = (PortfolioData)r.get(j);
 
-          PRQHelp.getCache().getLogger().fine("Going to destroy the value" + p);
+          getCache().getLogger().fine("Going to destroy the value" + p);
           r.destroy(j);
 
           Wait.pause(20);
 
           //Put the value back again.
-          PRQHelp.getCache().getLogger().fine("Putting the value back" + p);
+          getCache().getLogger().fine("Putting the value back" + p);
           r.put(j, p);
 
           //Reset j
@@ -305,12 +298,8 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
-
-        Query query = PRQHelp.getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active'");
+        Query query = getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active'");
 
         //Now run the query
         SelectResults results = null;
@@ -319,7 +308,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
         for (int i=0; i<500; i++) {
 
           try {
-            PRQHelp.getCache().getLogger().fine("Querying the region");
+            getCache().getLogger().fine("Querying the region");
             results = (SelectResults)query.execute();
           } catch (Exception e) {
             e.printStackTrace(); // TODO: eats exceptions
@@ -348,13 +337,13 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
   public void testConcurrentRemoveIndexAndQueryOnPR() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     name = "PartionedPortfoliosPR";
     //Create Local Region
     vm0.invoke(new CacheSerializableRunnable("Create local region with asynchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         try {
           AttributesFactory attr = new AttributesFactory();
@@ -374,7 +363,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
     });
 
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
 
@@ -383,10 +372,8 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
       @Override
       public void run2() throws CacheException {
 
-        Region r = PRQHelp.getCache().getRegion(name);
-
           //Create Index first to go in hook.
-          Cache cache = PRQHelp.getCache();
+          Cache cache = getCache();
           Index sindex = null;
           Index iindex = null;
           Index pkindex = null;
@@ -408,19 +395,15 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
-
-        Query query = PRQHelp.getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active' and p.ID > 0 and p.pk != ' ' ");
+        Query query = getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active' and p.ID > 0 and p.pk != ' ' ");
         //Now run the query
         SelectResults results = null;
 
         for (int i=0; i<10; i++) {
 
           try {
-            PRQHelp.getCache().getLogger().fine("Querying the region with " + query);
+            getCache().getLogger().fine("Querying the region with " + query);
             results = (SelectResults)query.execute();
           } catch (Exception e) {
             Assert.fail("Query: " + query + " execution failed with exception", e);
@@ -440,15 +423,12 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
       @Override
       public void run2() throws CacheException {
 
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
           //Create Index first to go in hook.
-          Cache cache = PRQHelp.getCache();
-       
-          PRQHelp.getCache().getQueryService().removeIndexes(r);
+          getCache().getQueryService().removeIndexes(r);
+
 
-          //pause(100);
-        
       }
     });
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
index 3ce1952..2cf8c3c 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
@@ -16,18 +16,24 @@
  */
 package com.gemstone.gemfire.cache.query.partitioned;
 
+import static com.gemstone.gemfire.cache.query.Utils.*;
+
+
 import java.util.Collection;
 
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.query.Index;
+import com.gemstone.gemfire.cache.query.IndexNameConflictException;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.ThreadUtils;
@@ -50,11 +56,23 @@ public class PRBasicIndexCreationDUnitTest extends
     super(name);
   }
 
+  public void setCacheInVMsUsingXML(String xmlFile, VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> GemFireCacheImpl.testCacheXml = PRQHelp.findFile(xmlFile));
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
+
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
   // int totalNumBuckets = 131;
 
   int queryTestCycle = 10;
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "PartionedPortfolios";
 
@@ -76,27 +94,23 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreationDUnitTest.testPRBasicIndexCreate started ....");
-
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
-    // Creating local region on vm0 to compare the results of query.
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(localName,
-    // Scope.DISTRIBUTED_ACK, redundancy));
+        redundancy, PortfolioData.class));
 
     // Creating the Datastores Nodes in the VM1.
     LogWriterUtils.getLogWriter()
         .info("PRBasicIndexCreationDUnitTest : creating all the prs ");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -126,21 +140,20 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
+    setCacheInVMs(vm0,vm1,vm2,vm3);
 
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testPRMultiIndexCreation Test Started");
-
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
-
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -168,21 +181,21 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testPRMultiIndexCreation Test Started");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
     vm1.invoke(PRQHelp
-        .getCacheSerializableRunnableForPRCreate(name, redundancy));
+        .getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp
-        .getCacheSerializableRunnableForPRCreate(name, redundancy));
+        .getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp
-        .getCacheSerializableRunnableForPRCreate(name, redundancy));
+        .getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -241,14 +254,13 @@ public class PRBasicIndexCreationDUnitTest extends
    */
   public void testCreatePartitionedIndexThroughXML() throws Exception
   {
-
+    IgnoredException ie = IgnoredException.addIgnoredException(IndexNameConflictException.class.getName());
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
-//    VM vm3 = host.getVM(3);
-    // closeAllCache();
+    closeCache();
     final String fileName = "PRIndexCreation.xml";
+    setCacheInVMsUsingXML(fileName, vm0, vm1);
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML started");
     LogWriterUtils.getLogWriter().info(
@@ -256,9 +268,10 @@ public class PRBasicIndexCreationDUnitTest extends
     LogWriterUtils.getLogWriter().info(
         "Starting a pr asynchronously using an xml file name : " + fileName);
     AsyncInvocation asyInvk0 = vm0.invokeAsync(PRQHelp
-        .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
+        .getCacheSerializableRunnableForPRCreate(name));
     AsyncInvocation asyInvk1 = vm1.invokeAsync(PRQHelp
-        .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
+        .getCacheSerializableRunnableForPRCreate(name));
+
     ThreadUtils.join(asyInvk1, 30 * 1000);
     if (asyInvk1.exceptionOccurred()) {
       Assert.fail("asyInvk1 failed", asyInvk1.getException());
@@ -267,7 +280,9 @@ public class PRBasicIndexCreationDUnitTest extends
     if (asyInvk0.exceptionOccurred()) {
       Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
-    // printing all the indexes are created.
+
+    ie.remove();
+//    // printing all the indexes are created.
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     LogWriterUtils.getLogWriter().info(
@@ -288,20 +303,18 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
+    setCacheInVMs(vm0,vm1,vm2,vm3);
 
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedRegionThroughXMLAndAPI started ");
     // creating all the prs
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnStatus", "p.status",null, "p"));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
@@ -311,9 +324,9 @@ public class PRBasicIndexCreationDUnitTest extends
         "PrIndexOnPKID", "p.pkid",null, "p"));
 //  adding a new node to an already existing system.
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     // putting some data in.
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -334,40 +347,27 @@ public class PRBasicIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
+    setCacheInVMs(vm0,vm1,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedIndexWithNoAliasAfterPuts started ");
     // creating all the prs
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnStatus", "status",null, ""));
-    //vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    //    "PrIndexOnId", "p.ID", "p"));
-
-    //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    //    "PrIndexOnPKID", "p.pkid", "p"));
-//  adding a new node to an already existing system.
-    //vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-    //    Scope.DISTRIBUTED_ACK, redundancy));
     // putting some data in.
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-  //  vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
   }
   
@@ -380,74 +380,28 @@ public class PRBasicIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
+    setCacheInVMs(vm0,vm1,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedIndexWithNoAliasBeforePuts started ");
     // creating all the prs
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
-
-    // vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    // "PrIndexOnId", "p.ID", "p"));
+        redundancy, PortfolioData.class));
 
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    // "PrIndexOnPKID", "p.pkid", "p"));
-    // adding a new node to an already existing system.
-    // vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-    // Scope.DISTRIBUTED_ACK, redundancy));
     // putting some data in.
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnStatus", "status",null, ""));
-    /*
-    vm1.invoke(new CacheSerializableRunnable("IndexCreationOnPosition") {
-      public void run2(){
-        try {
-          Cache cache = getCache();
-          QueryService qs = cache.getQueryService();
-          Region region = cache.getRegion(name);
-          LogWriter logger = cache.getLogger();
-         // logger.info("Test Creating index with Name : [ "+indexName+" ] " +
-         //               "IndexedExpression : [ "+indexedExpression+" ] Alias : [ "+alias+" ] FromClause : [ "+region.getFullPath() + " " + alias+" ] " );
-          Index parIndex = qs.createIndex("IndexOnPotionMktValue", IndexType.FUNCTIONAL, "pVal.mktValue"
-              ,region.getFullPath()+" pf, pf.positions pVal TYPE Position", "import parReg.\"query\".Position;");
-          logger.info(
-              "Index creted on partitioned region : " + parIndex);
-          logger.info(
-              "Number of buckets indexed in the partitioned region locally : "
-                  + "" + ((PartitionedIndex)parIndex).getNumberOfIndexedBucket()
-                  + " and remote buckets indexed : "
-                  + ((PartitionedIndex)parIndex).getNumRemoteBucketsIndexed());
-          /*
-           * assertEquals("Max num of buckets in the partiotion regions and
-           * the " + "buckets indexed should be equal",
-           * ((PartitionedRegion)region).getTotalNumberOfBuckets(),
-           * (((PartionedIndex)parIndex).getNumberOfIndexedBucket()+((PartionedIndex)parIndex).getNumRemtoeBucketsIndexed()));
-           * should put all the assetion in a seperate function.
-           */ 
-       /* } 
-        catch (Exception ex) {
-          fail("Creating Index in this vm failed : ", ex);
-        }
-      
-      }
-    });*/
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-    // vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
   } 
   
@@ -461,38 +415,35 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedIndexUsageWithPRQuery started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnId", "p.ID",null, "p"));
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
     // validation on index usage with queries over a pr
-    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
+    vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
+    vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
+    vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
     LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedIndexUsageWithPRQuery done ");
@@ -507,32 +458,28 @@ public class PRBasicIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
+    setCacheInVMs(vm0,vm1);
 
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
-    
     int redundancy = 1;
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedIndexCreationDuringPersistentRecovery started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-        redundancy, PRQHelp.valueConstraint));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-        redundancy, PRQHelp.valueConstraint));
-//    vm2.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-//        redundancy, PRQHelp.valueConstraint));
-    
+        redundancy, PortfolioData.class));
+
     
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     
     
     //Restart a single member
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+    setCacheInVMs(vm0);
     AsyncInvocation regionCreateFuture = vm0.invokeAsync(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-        redundancy, PRQHelp.valueConstraint));
+        redundancy, PortfolioData.class));
     
     //Ok, I want to do this in parallel
     AsyncInvocation indexCreateFuture = vm1.invokeAsync(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
@@ -543,15 +490,11 @@ public class PRBasicIndexCreationDUnitTest extends
     indexCreateFuture.getResult(20 * 1000);
     
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName,PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
-    // validation on index usage with queries over a pr
-    //The indexes may not have been completely created yet, because the buckets
-    //may still be recovering from disk.
-//    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
     LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedIndexCreationDuringPersistentRecovery done ");
@@ -569,26 +512,26 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedQueryWithIndexOnIdBug37089 started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnId", "p.ID",null, "p"));
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
@@ -608,9 +551,7 @@ public class PRBasicIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
-//    VM vm3 = host.getVM(3);
-    // closeAllCache();
+    setCacheInVMs(vm0,vm1);
     final String fileName = "PRIndexCreation.xml";
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML started");
@@ -618,35 +559,12 @@ public class PRBasicIndexCreationDUnitTest extends
         "Starting and initializing partitioned regions and indexes using xml");
     LogWriterUtils.getLogWriter().info(
         "Starting a pr asynchronously using an xml file name : " + fileName);
-   // AsyncInvocation asyInvk0 = vm0.invokeAsync(PRQHelp
-   //     .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
-   // AsyncInvocation asyInvk1 = vm1.invokeAsync(PRQHelp
-   //     .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
-   // asyInvk1.join();
-   // if (asyInvk1.exceptionOccurred()) {
-   //   fail("asyInvk1 failed", asyInvk1.getException());
-   // }
-   // asyInvk0.join();
-   // if (asyInvk0.exceptionOccurred()) {
-    //  fail("asyInvk0 failed", asyInvk0.getException());
-   // }
-    // printing all the indexes are created.
-    //vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-    //vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-    /*
-    <index name="index8">
-    <functional from-clause="/PartionedPortfolios.keys k" expression="k" />
-  </index> */
-  //  vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
-    
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
-//    vm0.invoke(PRQHelp
-//        .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+        redundancy, PortfolioData.class));
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -656,8 +574,6 @@ public class PRBasicIndexCreationDUnitTest extends
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "index7","nvl(k.status.toString(),'nopes')", "/PartionedPortfolios.values k" , ""));
     
-    //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(localName,
-    //    "index8","k", "/LocalPortfolios.keys k" , ""));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     
@@ -677,18 +593,19 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     // create more vms to host data.
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     //  Putting the data into the PR's created
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
      cnt, cntDest));
@@ -717,14 +634,15 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     //  Putting the data into the PR's created
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
      cnt, cntDest));
@@ -734,7 +652,7 @@ public class PRBasicIndexCreationDUnitTest extends
     
     // create an accessor vm.
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     
   }
@@ -766,7 +684,7 @@ public class PRBasicIndexCreationDUnitTest extends
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0,vm1,vm2,vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
      .info(
@@ -796,7 +714,7 @@ public class PRBasicIndexCreationDUnitTest extends
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -864,7 +782,7 @@ public class PRBasicIndexCreationDUnitTest extends
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0,vm1,vm2,vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
    .info(
@@ -894,7 +812,7 @@ public class PRBasicIndexCreationDUnitTest extends
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -952,7 +870,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0,vm1,vm2,vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
      .info(
@@ -982,7 +900,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,


Mime
View raw message