geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kl...@apache.org
Subject [1/3] incubator-geode git commit: Remove Disabled from names of tests. Ensure each test has a Category and add Ignore to any test that is disabled due to being broken.
Date Thu, 07 Jan 2016 22:00:30 GMT
Repository: incubator-geode
Updated Branches:
  refs/heads/feature/GEODE-714 b417b275b -> 4f0776572


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/4f077657/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitDisabledTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitDisabledTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitDisabledTest.java
deleted file mode 100755
index ffd5726..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitDisabledTest.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.Map;
-
-import com.gemstone.gemfire.cache.EvictionAlgorithm;
-import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
-import com.gemstone.gemfire.internal.cache.lru.MemLRUCapacityController;
-
-public class EvictionDUnitDisabledTest extends EvictionTestBase {
-  private static final long serialVersionUID = 270073077723092256L;
-
-  public EvictionDUnitDisabledTest(String name) {
-    super(name);
-  }
- 
-  public void testDummyInlineNCentralizedEviction() {
-    prepareScenario1(EvictionAlgorithm.LRU_HEAP,0);
-    putData("PR1", 50, 1);
-    
-    final int expectedEviction1 = getExpectedEvictionRatioOnVm(dataStore1);
-    final int expectedEviction2 = getExpectedEvictionRatioOnVm(dataStore2);
-    
-    raiseFakeNotification(dataStore1, "PR1", expectedEviction1);
-    raiseFakeNotification(dataStore2, "PR1", expectedEviction2);
-    validateNoOfEvictions("PR1", expectedEviction1 + expectedEviction2);
-
-    putData("PR1", 4, 1);
-    validateNoOfEvictions("PR1", 4 + expectedEviction1 + expectedEviction2);
-  }
-  
-  public void testThreadPoolSize() {
-    prepareScenario1(EvictionAlgorithm.LRU_HEAP,0);
-    putData("PR1", 50, 1);
-    raiseFakeNotification(dataStore1, "PR1", getExpectedEvictionRatioOnVm(dataStore1));
-    verifyThreadPoolTaskCount(HeapEvictor.MAX_EVICTOR_THREADS);
-  }
-  
-  public void testCentralizedEvictionnForDistributedRegionWithDummyEvent() {
-    prepareScenario1(EvictionAlgorithm.LRU_HEAP,0);
-    createDistributedRegion();
-    putDataInDistributedRegion(50, 1);
-    raiseFakeNotification(dataStore1, "DR1", getExpectedEvictionRatioOnVm(dataStore1));
-  }
-
-  /**
-   * Test Case Description: 2 VM's. 2 PR's. 4 buckets each PR. PR1 has action
-   * -Local destroy and PR2 has action - Overflow To Disk.
-   * 
-   * Test Case verifies:If naturally Eviction up and eviction Down events are
-   * raised. Centralized and Inline eviction are happening.All this verificatio
-   * is done thorugh logs. It also verifies that during eviction, if one node
-   * goes down and then comes up again causing GII to take place, the system
-   * doesnot throw an OOME.
-   */
-  public void testEvictionWithNodeDown() {
-    prepareScenario2(EvictionAlgorithm.LRU_HEAP, "PR3", "PR4");
-    putDataInDataStore3("PR3", 100, 1);
-    fakeNotification();
-    print("PR3");
-    killVm();
-    bringVMBackToLife();
-    assertEquals(100, getPRSize("PR3"));
-    assertEquals(0, getPRSize("PR4"));
-  }
-  
-  public void testEntryLruEvictions() {
-    int extraEntries=1;
-    createCache();
-    maxEnteries=3;
-    createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 4, 1, 1000,maxEnteries);
-    
-    final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
-    getLogWriter().info(
-        "PR- " +pr.getEvictionAttributes().getMaximum());
-    
-    for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) {
-      pr.put(new Integer(counter), new byte[1 * 1024 * 1024]);
-    }
-     
-    assertEquals(extraEntries,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions());
-  }
-  
-  
-  public void testEntryLru() {
-    createCache();
-    maxEnteries=12;
-    createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 4, 1, 1000,maxEnteries);
-    
-    final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
-    getLogWriter().info(
-        "PR- " +pr.getEvictionAttributes().getMaximum());
-    for (int i = 0; i < 3; i++) {
-      // assume mod-based hashing for bucket creation
-      pr.put(new Integer(i), "value0");
-      pr.put(new Integer(i
-          + pr.getPartitionAttributes().getTotalNumBuckets()), "value1");
-      pr.put(new Integer(i
-          + (pr.getPartitionAttributes().getTotalNumBuckets()) * 2),
-          "value2");
-    }
-    pr.put(new Integer(3), "value0");
-    
-    for (int i = 0; i < 2; i++) {
-      pr.put(new Integer(i
-          + pr.getPartitionAttributes().getTotalNumBuckets())*3, "value1");
-    }
-   assertEquals(0,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions());
-  }
-
-  public void testCheckEntryLruEvictionsIn1DataStore() {
-    int extraEntries=10;
-    createCache();
-    maxEnteries=20;
-    createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 5, 1, 1000,maxEnteries);
-    
-    final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
-    getLogWriter().info(
-        "PR- " +pr.getEvictionAttributes().getMaximum());
-    
-    for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) {
-      pr.put(new Integer(counter), new byte[1 * 1024 * 1024]);
-    }
-     
-    assertEquals(extraEntries,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions());
-    
-    for (final Iterator i = pr.getDataStore().getAllLocalBuckets().iterator(); i
-        .hasNext();) {
-      final Map.Entry entry = (Map.Entry)i.next();
-      final BucketRegion bucketRegion = (BucketRegion)entry.getValue();
-      if (bucketRegion == null) {
-        continue;
-      }
-      getLogWriter().info(
-          "FINAL bucket= " + bucketRegion.getFullPath() + "size= "
-              + bucketRegion.size() + "  count= "+bucketRegion.entryCount());
-      assertEquals(4,bucketRegion.size());
-    }
-  }
-  
-  public void testCheckEntryLruEvictionsIn2DataStore() {
-    maxEnteries=20;
-    prepareScenario1(EvictionAlgorithm.LRU_ENTRY,maxEnteries);
-    putData("PR1", 60, 1);
-    validateNoOfEvictions("PR1", 20);
-  }
-  
-  
-  public void testMemLruForPRAndDR() {
-    createCache();
-    createPartitionedRegion(true, EvictionAlgorithm.LRU_MEMORY, "PR1", 4, 1, 1000,40);
-    createDistRegionWithMemEvictionAttr();
-    PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
-    DistributedRegion dr = (DistributedRegion)cache.getRegion("DR1");
-    
-    assertEquals(pr.getLocalMaxMemory(), pr.getEvictionAttributes().getMaximum());
-    assertEquals(MemLRUCapacityController.DEFAULT_MAXIMUM_MEGABYTES, dr.getEvictionAttributes().getMaximum());
-   
-   for (int i = 0; i < 41; i++) {
-     pr.put(new Integer(i), new byte[1 * 1024 * 1024]);
-    }
-   
-   assertTrue(1<=((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions());
-   assertTrue(((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()<=2);
-   
-   for (int i = 0; i < 11; i++) {
-     dr.put(new Integer(i), new byte[1 * 1024 * 1024]);
-    }
-  
-   assertTrue(1<=((AbstractLRURegionMap)dr.entries)._getLruList().stats().getEvictions());
-   assertTrue(((AbstractLRURegionMap)dr.entries)._getLruList().stats().getEvictions()<=2);
-  }
-  
-  public void testEachTaskSize() {
-    createCache();
-    createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR1", 6, 1,
-        1000, 40);
-    createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR2", 10, 1,
-        1000, 40);
-    createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR3", 15, 1,
-        1000, 40);
-    createDistRegion();
-
-    ArrayList<Integer> taskSetSizes = getTestTaskSetSizes();
-    if (taskSetSizes != null) {
-      for (Integer size : taskSetSizes) {
-        assertEquals(8, size.intValue());
-      }
-    }
-
-    /*
-    final PartitionedRegion pr1 = (PartitionedRegion)cache.getRegion("PR1");
-    final PartitionedRegion pr2 = (PartitionedRegion)cache.getRegion("PR2");
-    final PartitionedRegion pr3 = (PartitionedRegion)cache.getRegion("PR3");
-    final DistributedRegion dr1 = (DistributedRegion)cache.getRegion("DR1");
-    
-    for (int counter = 1; counter <= 18; counter++) {
-      pr1.put(new Integer(counter), new byte[1 * 1024 * 1024]);
-    }
-    getLogWriter().info("Size of PR1 before eviction = "+ pr1.size());
-    
-    for (int counter = 1; counter <= 30; counter++) {
-      pr2.put(new Integer(counter), new byte[1 * 1024 * 1024]);
-    }
-    getLogWriter().info("Size of PR2 before eviction = "+ pr2.size());
-    
-    for (int counter = 1; counter <= 45; counter++) {
-      pr3.put(new Integer(counter), new byte[1 * 1024 * 1024]);
-    }
-    getLogWriter().info("Size of PR3 before eviction = "+ pr3.size());
-    
-    for (int counter = 1; counter <= 150; counter++) {
-      dr1.put(new Integer(counter), new byte[1 * 1024 * 1024]);
-    }
-    getLogWriter().info("Size of DR1 before eviction = "+ dr1.size());
-    
-    
-    getLogWriter().info("Size of PR1 after eviction = "+ pr1.size());
-    getLogWriter().info("Size of PR2 after eviction = "+ pr2.size());
-    getLogWriter().info("Size of PR3 after eviction = "+ pr3.size());
-    getLogWriter().info("Size of PR4 after eviction = "+ dr1.size());*/
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/4f077657/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java
new file mode 100755
index 0000000..33807b7
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.junit.Ignore;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.EvictionAlgorithm;
+import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
+import com.gemstone.gemfire.internal.cache.lru.MemLRUCapacityController;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+
+@Category(DistributedTest.class)
+@Ignore("Test was disabled by renaming to DisabledTest")
+public class EvictionDUnitTest extends EvictionTestBase {
+  private static final long serialVersionUID = 270073077723092256L;
+
+  public EvictionDUnitTest(String name) {
+    super(name);
+  }
+ 
+  public void testDummyInlineNCentralizedEviction() {
+    prepareScenario1(EvictionAlgorithm.LRU_HEAP,0);
+    putData("PR1", 50, 1);
+    
+    final int expectedEviction1 = getExpectedEvictionRatioOnVm(dataStore1);
+    final int expectedEviction2 = getExpectedEvictionRatioOnVm(dataStore2);
+    
+    raiseFakeNotification(dataStore1, "PR1", expectedEviction1);
+    raiseFakeNotification(dataStore2, "PR1", expectedEviction2);
+    validateNoOfEvictions("PR1", expectedEviction1 + expectedEviction2);
+
+    putData("PR1", 4, 1);
+    validateNoOfEvictions("PR1", 4 + expectedEviction1 + expectedEviction2);
+  }
+  
+  public void testThreadPoolSize() {
+    prepareScenario1(EvictionAlgorithm.LRU_HEAP,0);
+    putData("PR1", 50, 1);
+    raiseFakeNotification(dataStore1, "PR1", getExpectedEvictionRatioOnVm(dataStore1));
+    verifyThreadPoolTaskCount(HeapEvictor.MAX_EVICTOR_THREADS);
+  }
+  
+  public void testCentralizedEvictionnForDistributedRegionWithDummyEvent() {
+    prepareScenario1(EvictionAlgorithm.LRU_HEAP,0);
+    createDistributedRegion();
+    putDataInDistributedRegion(50, 1);
+    raiseFakeNotification(dataStore1, "DR1", getExpectedEvictionRatioOnVm(dataStore1));
+  }
+
+  /**
+   * Test Case Description: 2 VM's. 2 PR's. 4 buckets each PR. PR1 has action
+   * -Local destroy and PR2 has action - Overflow To Disk.
+   * 
+   * Test Case verifies:If naturally Eviction up and eviction Down events are
+   * raised. Centralized and Inline eviction are happening.All this verificatio
+   * is done thorugh logs. It also verifies that during eviction, if one node
+   * goes down and then comes up again causing GII to take place, the system
+   * doesnot throw an OOME.
+   */
+  public void testEvictionWithNodeDown() {
+    prepareScenario2(EvictionAlgorithm.LRU_HEAP, "PR3", "PR4");
+    putDataInDataStore3("PR3", 100, 1);
+    fakeNotification();
+    print("PR3");
+    killVm();
+    bringVMBackToLife();
+    assertEquals(100, getPRSize("PR3"));
+    assertEquals(0, getPRSize("PR4"));
+  }
+  
+  public void testEntryLruEvictions() {
+    int extraEntries=1;
+    createCache();
+    maxEnteries=3;
+    createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 4, 1, 1000,maxEnteries);
+    
+    final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
+    getLogWriter().info(
+        "PR- " +pr.getEvictionAttributes().getMaximum());
+    
+    for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) {
+      pr.put(new Integer(counter), new byte[1 * 1024 * 1024]);
+    }
+     
+    assertEquals(extraEntries,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions());
+  }
+  
+  
+  public void testEntryLru() {
+    createCache();
+    maxEnteries=12;
+    createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 4, 1, 1000,maxEnteries);
+    
+    final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
+    getLogWriter().info(
+        "PR- " +pr.getEvictionAttributes().getMaximum());
+    for (int i = 0; i < 3; i++) {
+      // assume mod-based hashing for bucket creation
+      pr.put(new Integer(i), "value0");
+      pr.put(new Integer(i
+          + pr.getPartitionAttributes().getTotalNumBuckets()), "value1");
+      pr.put(new Integer(i
+          + (pr.getPartitionAttributes().getTotalNumBuckets()) * 2),
+          "value2");
+    }
+    pr.put(new Integer(3), "value0");
+    
+    for (int i = 0; i < 2; i++) {
+      pr.put(new Integer(i
+          + pr.getPartitionAttributes().getTotalNumBuckets())*3, "value1");
+    }
+   assertEquals(0,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions());
+  }
+
+  public void testCheckEntryLruEvictionsIn1DataStore() {
+    int extraEntries=10;
+    createCache();
+    maxEnteries=20;
+    createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 5, 1, 1000,maxEnteries);
+    
+    final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
+    getLogWriter().info(
+        "PR- " +pr.getEvictionAttributes().getMaximum());
+    
+    for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) {
+      pr.put(new Integer(counter), new byte[1 * 1024 * 1024]);
+    }
+     
+    assertEquals(extraEntries,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions());
+    
+    for (final Iterator i = pr.getDataStore().getAllLocalBuckets().iterator(); i
+        .hasNext();) {
+      final Map.Entry entry = (Map.Entry)i.next();
+      final BucketRegion bucketRegion = (BucketRegion)entry.getValue();
+      if (bucketRegion == null) {
+        continue;
+      }
+      getLogWriter().info(
+          "FINAL bucket= " + bucketRegion.getFullPath() + "size= "
+              + bucketRegion.size() + "  count= "+bucketRegion.entryCount());
+      assertEquals(4,bucketRegion.size());
+    }
+  }
+  
+  public void testCheckEntryLruEvictionsIn2DataStore() {
+    maxEnteries=20;
+    prepareScenario1(EvictionAlgorithm.LRU_ENTRY,maxEnteries);
+    putData("PR1", 60, 1);
+    validateNoOfEvictions("PR1", 20);
+  }
+  
+  
+  public void testMemLruForPRAndDR() {
+    createCache();
+    createPartitionedRegion(true, EvictionAlgorithm.LRU_MEMORY, "PR1", 4, 1, 1000,40);
+    createDistRegionWithMemEvictionAttr();
+    PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
+    DistributedRegion dr = (DistributedRegion)cache.getRegion("DR1");
+    
+    assertEquals(pr.getLocalMaxMemory(), pr.getEvictionAttributes().getMaximum());
+    assertEquals(MemLRUCapacityController.DEFAULT_MAXIMUM_MEGABYTES, dr.getEvictionAttributes().getMaximum());
+   
+   for (int i = 0; i < 41; i++) {
+     pr.put(new Integer(i), new byte[1 * 1024 * 1024]);
+    }
+   
+   assertTrue(1<=((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions());
+   assertTrue(((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()<=2);
+   
+   for (int i = 0; i < 11; i++) {
+     dr.put(new Integer(i), new byte[1 * 1024 * 1024]);
+    }
+  
+   assertTrue(1<=((AbstractLRURegionMap)dr.entries)._getLruList().stats().getEvictions());
+   assertTrue(((AbstractLRURegionMap)dr.entries)._getLruList().stats().getEvictions()<=2);
+  }
+  
+  public void testEachTaskSize() {
+    createCache();
+    createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR1", 6, 1,
+        1000, 40);
+    createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR2", 10, 1,
+        1000, 40);
+    createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR3", 15, 1,
+        1000, 40);
+    createDistRegion();
+
+    ArrayList<Integer> taskSetSizes = getTestTaskSetSizes();
+    if (taskSetSizes != null) {
+      for (Integer size : taskSetSizes) {
+        assertEquals(8, size.intValue());
+      }
+    }
+
+    /*
+    final PartitionedRegion pr1 = (PartitionedRegion)cache.getRegion("PR1");
+    final PartitionedRegion pr2 = (PartitionedRegion)cache.getRegion("PR2");
+    final PartitionedRegion pr3 = (PartitionedRegion)cache.getRegion("PR3");
+    final DistributedRegion dr1 = (DistributedRegion)cache.getRegion("DR1");
+    
+    for (int counter = 1; counter <= 18; counter++) {
+      pr1.put(new Integer(counter), new byte[1 * 1024 * 1024]);
+    }
+    getLogWriter().info("Size of PR1 before eviction = "+ pr1.size());
+    
+    for (int counter = 1; counter <= 30; counter++) {
+      pr2.put(new Integer(counter), new byte[1 * 1024 * 1024]);
+    }
+    getLogWriter().info("Size of PR2 before eviction = "+ pr2.size());
+    
+    for (int counter = 1; counter <= 45; counter++) {
+      pr3.put(new Integer(counter), new byte[1 * 1024 * 1024]);
+    }
+    getLogWriter().info("Size of PR3 before eviction = "+ pr3.size());
+    
+    for (int counter = 1; counter <= 150; counter++) {
+      dr1.put(new Integer(counter), new byte[1 * 1024 * 1024]);
+    }
+    getLogWriter().info("Size of DR1 before eviction = "+ dr1.size());
+    
+    
+    getLogWriter().info("Size of PR1 after eviction = "+ pr1.size());
+    getLogWriter().info("Size of PR2 after eviction = "+ pr2.size());
+    getLogWriter().info("Size of PR3 after eviction = "+ pr3.size());
+    getLogWriter().info("Size of PR4 after eviction = "+ dr1.size());*/
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/4f077657/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java
index 386f8ce..060aea7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java
@@ -34,7 +34,7 @@ import dunit.VM;
  * Performs eviction dunit tests for off-heap memory.
  * @author rholmes
  */
-public class OffHeapEvictionDUnitTest extends EvictionDUnitDisabledTest {
+public class OffHeapEvictionDUnitTest extends EvictionDUnitTest {
   public OffHeapEvictionDUnitTest(String name) {
     super(name);
   }  

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/4f077657/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitDisabledTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitDisabledTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitDisabledTest.java
deleted file mode 100644
index 6b957e8..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitDisabledTest.java
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache.ha;
-
-import java.util.Iterator;
-import java.util.Properties;
-
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionAttributes;
-import com.gemstone.gemfire.cache.Scope;
-import com.gemstone.gemfire.cache.client.internal.PoolImpl;
-import com.gemstone.gemfire.cache.server.CacheServer;
-import com.gemstone.gemfire.cache30.ClientServerTestCase;
-import com.gemstone.gemfire.distributed.DistributedSystem;
-import com.gemstone.gemfire.internal.AvailablePort;
-
-import dunit.DistributedTestCase;
-import dunit.Host;
-import dunit.VM;
-
-/**
- * This is Dunit test for bug 36109. This test has a cache-client having a primary
- * and a secondary cache-server as its endpoint. Primary does some operations
- * and is stopped, the client fails over to secondary and does some operations
- * and it is verified that the 'invalidates' stats at the client is same as the
- * total number of operations done by both primary and secondary. The bug was
- * appearing because invalidate stats was part of Endpoint which used to get
- * closed during fail over , with the failed endpoint getting closed. This bug
- * has been fixed by moving the invalidate stat to be part of our implementation.
- * 
- * @author Dinesh Patel
- * 
- */
-public class StatsBugDUnitDisabledTest extends DistributedTestCase
-{
-  /** primary cache server */
-  VM primary = null;
-
-  /** secondary cache server */
-  VM secondary = null;
-
-  /** the cache client */
-  VM client1 = null;
-
-  /** the cache */
-  private static Cache cache = null;
-
-  /** port for the primary cache server */
-  private static int PORT1;
-
-  /** port for the secondary cache server */
-  private static int PORT2;
-
-  /** name of the test region */
-  private static final String REGION_NAME = "StatsBugDUnitTest_Region";
-
-  /** brige-writer instance( used to get connection proxy handle) */
-  private static PoolImpl pool = null;
-
-  /** total number of cache servers */
-  private static final int TOTAL_SERVERS = 2;
-
-  /** number of puts done by each server */
-  private static final int PUTS_PER_SERVER = 10;
-
-  /** prefix added to the keys of events generated on primary */
-  private static final String primaryPrefix = "primary_";
-
-  /** prefix added to the keys of events generated on secondary */
-  private static final String secondaryPrefix = "secondary_";
-
-  /**
-   * Constructor
-   * 
-   * @param name -
-   *          name for this test instance
-   */
-  public StatsBugDUnitDisabledTest(String name) {
-    super(name);
-  }
-
-  /**
-   * Creates the primary and the secondary cache servers
-   * 
-   * @throws Exception -
-   *           thrown if any problem occurs in initializing the test
-   */
-  public void setUp() throws Exception
-  {
-    disconnectAllFromDS();
-    super.setUp();
-    final Host host = Host.getHost(0);
-    primary = host.getVM(0);
-    secondary = host.getVM(1);
-    client1 = host.getVM(2);
-    PORT1 = ((Integer)primary.invoke(StatsBugDUnitDisabledTest.class,
-        "createServerCache")).intValue();
-    PORT2 = ((Integer)secondary.invoke(StatsBugDUnitDisabledTest.class,
-        "createServerCache")).intValue();
-  }
-
-  /**
-   * Create the cache
-   * 
-   * @param props -
-   *          properties for DS
-   * @return the cache instance
-   * @throws Exception -
-   *           thrown if any problem occurs in cache creation
-   */
-  private Cache createCache(Properties props) throws Exception
-  {
-    DistributedSystem ds = getSystem(props);
-    ds.disconnect();
-    ds = getSystem(props);
-    Cache cache = null;
-    cache = CacheFactory.create(ds);
-    if (cache == null) {
-      throw new Exception("CacheFactory.create() returned null ");
-    }
-    return cache;
-  }
-
-  /**
-   * close the cache instances in server and client during tearDown
-   * 
-   * @throws Exception
-   *           thrown if any problem occurs in closing cache
-   */
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
-    // close client
-    client1.invoke(StatsBugDUnitDisabledTest.class, "closeCache");
-
-    // close server
-    primary.invoke(StatsBugDUnitDisabledTest.class, "closeCache");
-    secondary.invoke(StatsBugDUnitDisabledTest.class, "closeCache");
-  }
-
-  /**
-   * This test does the following:<br>
-   * 1)Create and populate the client<br>
-   * 2)Do some operations from the primary cache-server<br>
-   * 3)Stop the primary cache-server<br>
-   * 4)Wait some time to allow client to failover to secondary and do some
-   * operations from secondary<br>
-   * 5)Verify that the invalidates stats at the client accounts for the
-   * operations done by both, primary and secondary.
-   * 
-   * @throws Exception -
-   *           thrown if any problem occurs in test execution
-   */
-  public void testBug36109() throws Exception
-  {
-    getLogWriter().info("testBug36109 : BEGIN");
-    client1.invoke(StatsBugDUnitDisabledTest.class, "createClientCacheForInvalidates", new Object[] {
-        getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2) });
-    client1.invoke(StatsBugDUnitDisabledTest.class, "prepopulateClient");
-    primary.invoke(StatsBugDUnitDisabledTest.class, "doEntryOperations",
-        new Object[] { primaryPrefix });
-    pause(3000);
-    primary.invoke(StatsBugDUnitDisabledTest.class, "stopServer");
-    try {
-      Thread.sleep(5000);
-    }
-    catch (InterruptedException ignore) {
-      fail("interrupted");
-    }
-
-    secondary.invoke(StatsBugDUnitDisabledTest.class, "doEntryOperations",
-        new Object[] { secondaryPrefix });
-    try {
-      Thread.sleep(5000);
-    }
-    catch (InterruptedException ignore) {
-      fail("interrupted");
-    }
-
-    client1.invoke(StatsBugDUnitDisabledTest.class, "verifyNumInvalidates");
-    getLogWriter().info("testBug36109 : END");
-  }
-
-  /**
-   * Creates and starts the cache-server
-   * 
-   * @return - the port on which cache-server is running
-   * @throws Exception -
-   *           thrown if any problem occurs in cache/server creation
-   */
-  public static Integer createServerCache() throws Exception
-  {
-    StatsBugDUnitDisabledTest test = new StatsBugDUnitDisabledTest("temp");
-    Properties props = new Properties();
-    cache = test.createCache(props);
-    AttributesFactory factory = new AttributesFactory();
-    factory.setScope(Scope.DISTRIBUTED_ACK);
-    factory.setDataPolicy(DataPolicy.REPLICATE);
-
-    RegionAttributes attrs = factory.create();
-
-    cache.createRegion(REGION_NAME, attrs);
-    CacheServer server = cache.addCacheServer();
-    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    server.setPort(port);
-    server.setNotifyBySubscription(false);
-    server.setSocketBufferSize(32768);
-    server.start();
-    getLogWriter().info("Server started at PORT = " + port);
-    return new Integer(port);
-  }
-
-  /**
-   * Initializes the cache client
-   * 
-   * @param port1 -
-   *          port for the primary cache-server
-   * @param port2-port
-   *          for the secondary cache-server
-   * @throws Exception-thrown
-   *           if any problem occurs in initializing the client
-   */
-  public static void createClientCache(String host, Integer port1, Integer port2)
-      throws Exception
-  {
-    StatsBugDUnitDisabledTest test = new StatsBugDUnitDisabledTest("temp");
-    cache = test.createCache(createProperties1());
-    AttributesFactory factory = new AttributesFactory();
-    factory.setScope(Scope.DISTRIBUTED_ACK);
-    pool = (PoolImpl)ClientServerTestCase.configureConnectionPool(factory, host, new int[] {port1.intValue(),port2.intValue()}, true, -1, 3, null);
-    RegionAttributes attrs = factory.create();
-    Region region = cache.createRegion(REGION_NAME, attrs);
-    region.registerInterest("ALL_KEYS");
-    getLogWriter().info("Client cache created");
-  }
-
-  /**
-   * Initializes the cache client
-   * 
-   * @param port1 -
-   *          port for the primary cache-server
-   * @param port2-port
-   *          for the secondary cache-server
-   * @throws Exception-thrown
-   *           if any problem occurs in initializing the client
-   */
-  public static void createClientCacheForInvalidates(String host, Integer port1, Integer port2)
-      throws Exception
-  {
-    StatsBugDUnitDisabledTest test = new StatsBugDUnitDisabledTest("temp");
-    cache = test.createCache(createProperties1());
-    AttributesFactory factory = new AttributesFactory();
-    factory.setScope(Scope.DISTRIBUTED_ACK);
-    pool = (PoolImpl)ClientServerTestCase.configureConnectionPool(factory, host, new int[] {port1.intValue(),port2.intValue()}, true, -1, 3, null);
-    RegionAttributes attrs = factory.create();
-    Region region = cache.createRegion(REGION_NAME, attrs);
-    region.registerInterest("ALL_KEYS", false, false);
-    getLogWriter().info("Client cache created");
-  }
-  
-  /**
-   * Verify that the invalidates stats at the client accounts for the operations
-   * done by both, primary and secondary.
-   * 
-   */
-  public static void verifyNumInvalidates()
-  {
-    long invalidatesRecordedByStats = pool.getInvalidateCount();
-    getLogWriter().info(
-        "invalidatesRecordedByStats = " + invalidatesRecordedByStats);
-
-    int expectedInvalidates = TOTAL_SERVERS * PUTS_PER_SERVER;
-    getLogWriter().info("expectedInvalidates = " + expectedInvalidates);
-
-    if (invalidatesRecordedByStats != expectedInvalidates) {
-      fail("Invalidates received by client(" + invalidatesRecordedByStats
-          + ") does not match with the number of operations("
-          + expectedInvalidates + ") done at server");
-    }
-  }
-
-  /**
-   * Stops the cache server
-   * 
-   */
-  public static void stopServer()
-  {
-    try {
-      Iterator iter = cache.getCacheServers().iterator();
-      if (iter.hasNext()) {
-        CacheServer server = (CacheServer)iter.next();
-        server.stop();
-      }
-    }
-    catch (Exception e) {
-      fail("failed while stopServer()" + e);
-    }
-  }
-
-  /**
-   * create properties for a loner VM
-   */
-  private static Properties createProperties1()
-  {
-    Properties props = new Properties();
-    props.setProperty("mcast-port", "0");
-    props.setProperty("locators", "");
-    return props;
-  }
-
-
-  /**
-   * Do PUT operations
-   * 
-   * @param keyPrefix -
-   *          string prefix for the keys for all the entries do be done
-   * @throws Exception -
-   *           thrown if any exception occurs in doing PUTs
-   */
-  public static void doEntryOperations(String keyPrefix) throws Exception
-  {
-    Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
-    for (int i = 0; i < PUTS_PER_SERVER; i++) {
-      r1.put(keyPrefix + i, keyPrefix + "val-" + i);
-    }
-  }
-
-  /**
-   * Prepopulate the client with the entries that will be done by cache-servers
-   * 
-   * @throws Exception
-   */
-  public static void prepopulateClient() throws Exception
-  {
-    doEntryOperations(primaryPrefix);
-    doEntryOperations(secondaryPrefix);
-  }
-
-  /**
-   * Close the cache
-   * 
-   */
-  public static void closeCache()
-  {
-    if (cache != null && !cache.isClosed()) {
-      cache.close();
-      cache.getDistributedSystem().disconnect();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/4f077657/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitTest.java
new file mode 100644
index 0000000..3d09f80
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitTest.java
@@ -0,0 +1,374 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.ha;
+
+import java.util.Iterator;
+import java.util.Properties;
+
+import org.junit.Ignore;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.cache.client.internal.PoolImpl;
+import com.gemstone.gemfire.cache.server.CacheServer;
+import com.gemstone.gemfire.cache30.ClientServerTestCase;
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.VM;
+
+/**
+ * This is Dunit test for bug 36109. This test has a cache-client having a primary
+ * and a secondary cache-server as its endpoint. Primary does some operations
+ * and is stopped, the client fails over to secondary and does some operations
+ * and it is verified that the 'invalidates' stats at the client is same as the
+ * total number of operations done by both primary and secondary. The bug was
+ * appearing because invalidate stats was part of Endpoint which used to get
+ * closed during fail over , with the failed endpoint getting closed. This bug
+ * has been fixed by moving the invalidate stat to be part of our implementation.
+ * 
+ * @author Dinesh Patel
+ * 
+ */
+@Category(DistributedTest.class)
+@Ignore("Test was disabled by renaming to DisabledTest")
+public class StatsBugDUnitTest extends DistributedTestCase
+{
+  /** primary cache server */
+  VM primary = null;
+
+  /** secondary cache server */
+  VM secondary = null;
+
+  /** the cache client */
+  VM client1 = null;
+
+  /** the cache */
+  private static Cache cache = null;
+
+  /** port for the primary cache server */
+  private static int PORT1;
+
+  /** port for the secondary cache server */
+  private static int PORT2;
+
+  /** name of the test region */
+  private static final String REGION_NAME = "StatsBugDUnitTest_Region";
+
+  /** brige-writer instance( used to get connection proxy handle) */
+  private static PoolImpl pool = null;
+
+  /** total number of cache servers */
+  private static final int TOTAL_SERVERS = 2;
+
+  /** number of puts done by each server */
+  private static final int PUTS_PER_SERVER = 10;
+
+  /** prefix added to the keys of events generated on primary */
+  private static final String primaryPrefix = "primary_";
+
+  /** prefix added to the keys of events generated on secondary */
+  private static final String secondaryPrefix = "secondary_";
+
+  /**
+   * Constructor
+   * 
+   * @param name -
+   *          name for this test instance
+   */
+  public StatsBugDUnitTest(String name) {
+    super(name);
+  }
+
+  /**
+   * Creates the primary and the secondary cache servers
+   * 
+   * @throws Exception -
+   *           thrown if any problem occurs in initializing the test
+   */
+  public void setUp() throws Exception
+  {
+    disconnectAllFromDS();
+    super.setUp();
+    final Host host = Host.getHost(0);
+    primary = host.getVM(0);
+    secondary = host.getVM(1);
+    client1 = host.getVM(2);
+    PORT1 = ((Integer)primary.invoke(StatsBugDUnitTest.class,
+        "createServerCache")).intValue();
+    PORT2 = ((Integer)secondary.invoke(StatsBugDUnitTest.class,
+        "createServerCache")).intValue();
+  }
+
+  /**
+   * Create the cache
+   * 
+   * @param props -
+   *          properties for DS
+   * @return the cache instance
+   * @throws Exception -
+   *           thrown if any problem occurs in cache creation
+   */
+  private Cache createCache(Properties props) throws Exception
+  {
+    DistributedSystem ds = getSystem(props);
+    ds.disconnect();
+    ds = getSystem(props);
+    Cache cache = null;
+    cache = CacheFactory.create(ds);
+    if (cache == null) {
+      throw new Exception("CacheFactory.create() returned null ");
+    }
+    return cache;
+  }
+
+  /**
+   * close the cache instances in server and client during tearDown
+   * 
+   * @throws Exception
+   *           thrown if any problem occurs in closing cache
+   */
+  public void tearDown2() throws Exception
+  {
+    super.tearDown2();
+    // close client
+    client1.invoke(StatsBugDUnitTest.class, "closeCache");
+
+    // close server
+    primary.invoke(StatsBugDUnitTest.class, "closeCache");
+    secondary.invoke(StatsBugDUnitTest.class, "closeCache");
+  }
+
+  /**
+   * This test does the following:<br>
+   * 1)Create and populate the client<br>
+   * 2)Do some operations from the primary cache-server<br>
+   * 3)Stop the primary cache-server<br>
+   * 4)Wait some time to allow client to failover to secondary and do some
+   * operations from secondary<br>
+   * 5)Verify that the invalidates stats at the client accounts for the
+   * operations done by both, primary and secondary.
+   * 
+   * @throws Exception -
+   *           thrown if any problem occurs in test execution
+   */
+  public void testBug36109() throws Exception
+  {
+    getLogWriter().info("testBug36109 : BEGIN");
+    client1.invoke(StatsBugDUnitTest.class, "createClientCacheForInvalidates", new Object[] {
+        getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2) });
+    client1.invoke(StatsBugDUnitTest.class, "prepopulateClient");
+    primary.invoke(StatsBugDUnitTest.class, "doEntryOperations",
+        new Object[] { primaryPrefix });
+    pause(3000);
+    primary.invoke(StatsBugDUnitTest.class, "stopServer");
+    try {
+      Thread.sleep(5000);
+    }
+    catch (InterruptedException ignore) {
+      fail("interrupted");
+    }
+
+    secondary.invoke(StatsBugDUnitTest.class, "doEntryOperations",
+        new Object[] { secondaryPrefix });
+    try {
+      Thread.sleep(5000);
+    }
+    catch (InterruptedException ignore) {
+      fail("interrupted");
+    }
+
+    client1.invoke(StatsBugDUnitTest.class, "verifyNumInvalidates");
+    getLogWriter().info("testBug36109 : END");
+  }
+
+  /**
+   * Creates and starts the cache-server
+   * 
+   * @return - the port on which cache-server is running
+   * @throws Exception -
+   *           thrown if any problem occurs in cache/server creation
+   */
+  public static Integer createServerCache() throws Exception
+  {
+    StatsBugDUnitTest test = new StatsBugDUnitTest("temp");
+    Properties props = new Properties();
+    cache = test.createCache(props);
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.DISTRIBUTED_ACK);
+    factory.setDataPolicy(DataPolicy.REPLICATE);
+
+    RegionAttributes attrs = factory.create();
+
+    cache.createRegion(REGION_NAME, attrs);
+    CacheServer server = cache.addCacheServer();
+    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    server.setPort(port);
+    server.setNotifyBySubscription(false);
+    server.setSocketBufferSize(32768);
+    server.start();
+    getLogWriter().info("Server started at PORT = " + port);
+    return new Integer(port);
+  }
+
+  /**
+   * Initializes the cache client
+   * 
+   * @param port1 -
+   *          port for the primary cache-server
+   * @param port2-port
+   *          for the secondary cache-server
+   * @throws Exception-thrown
+   *           if any problem occurs in initializing the client
+   */
+  public static void createClientCache(String host, Integer port1, Integer port2)
+      throws Exception
+  {
+    StatsBugDUnitTest test = new StatsBugDUnitTest("temp");
+    cache = test.createCache(createProperties1());
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.DISTRIBUTED_ACK);
+    pool = (PoolImpl)ClientServerTestCase.configureConnectionPool(factory, host, new int[] {port1.intValue(),port2.intValue()}, true, -1, 3, null);
+    RegionAttributes attrs = factory.create();
+    Region region = cache.createRegion(REGION_NAME, attrs);
+    region.registerInterest("ALL_KEYS");
+    getLogWriter().info("Client cache created");
+  }
+
+  /**
+   * Initializes the cache client
+   * 
+   * @param port1 -
+   *          port for the primary cache-server
+   * @param port2-port
+   *          for the secondary cache-server
+   * @throws Exception-thrown
+   *           if any problem occurs in initializing the client
+   */
+  public static void createClientCacheForInvalidates(String host, Integer port1, Integer port2)
+      throws Exception
+  {
+    StatsBugDUnitTest test = new StatsBugDUnitTest("temp");
+    cache = test.createCache(createProperties1());
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.DISTRIBUTED_ACK);
+    pool = (PoolImpl)ClientServerTestCase.configureConnectionPool(factory, host, new int[] {port1.intValue(),port2.intValue()}, true, -1, 3, null);
+    RegionAttributes attrs = factory.create();
+    Region region = cache.createRegion(REGION_NAME, attrs);
+    region.registerInterest("ALL_KEYS", false, false);
+    getLogWriter().info("Client cache created");
+  }
+  
+  /**
+   * Verify that the invalidates stats at the client accounts for the operations
+   * done by both, primary and secondary.
+   * 
+   */
+  public static void verifyNumInvalidates()
+  {
+    long invalidatesRecordedByStats = pool.getInvalidateCount();
+    getLogWriter().info(
+        "invalidatesRecordedByStats = " + invalidatesRecordedByStats);
+
+    int expectedInvalidates = TOTAL_SERVERS * PUTS_PER_SERVER;
+    getLogWriter().info("expectedInvalidates = " + expectedInvalidates);
+
+    if (invalidatesRecordedByStats != expectedInvalidates) {
+      fail("Invalidates received by client(" + invalidatesRecordedByStats
+          + ") does not match with the number of operations("
+          + expectedInvalidates + ") done at server");
+    }
+  }
+
+  /**
+   * Stops the cache server
+   * 
+   */
+  public static void stopServer()
+  {
+    try {
+      Iterator iter = cache.getCacheServers().iterator();
+      if (iter.hasNext()) {
+        CacheServer server = (CacheServer)iter.next();
+        server.stop();
+      }
+    }
+    catch (Exception e) {
+      fail("failed while stopServer()" + e);
+    }
+  }
+
+  /**
+   * create properties for a loner VM
+   */
+  private static Properties createProperties1()
+  {
+    Properties props = new Properties();
+    props.setProperty("mcast-port", "0");
+    props.setProperty("locators", "");
+    return props;
+  }
+
+
+  /**
+   * Do PUT operations
+   * 
+   * @param keyPrefix -
+   *          string prefix for the keys for all the entries do be done
+   * @throws Exception -
+   *           thrown if any exception occurs in doing PUTs
+   */
+  public static void doEntryOperations(String keyPrefix) throws Exception
+  {
+    Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
+    for (int i = 0; i < PUTS_PER_SERVER; i++) {
+      r1.put(keyPrefix + i, keyPrefix + "val-" + i);
+    }
+  }
+
+  /**
+   * Prepopulate the client with the entries that will be done by cache-servers
+   * 
+   * @throws Exception
+   */
+  public static void prepopulateClient() throws Exception
+  {
+    doEntryOperations(primaryPrefix);
+    doEntryOperations(secondaryPrefix);
+  }
+
+  /**
+   * Close the cache
+   * 
+   */
+  public static void closeCache()
+  {
+    if (cache != null && !cache.isClosed()) {
+      cache.close();
+      cache.getDistributedSystem().disconnect();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/4f077657/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/ContainerTest.java
----------------------------------------------------------------------
diff --git a/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/ContainerTest.java b/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/ContainerTest.java
new file mode 100755
index 0000000..8eec738
--- /dev/null
+++ b/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/ContainerTest.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.junit.categories;
+/**
+ * JUnit Test Category that specifies a test executes within a container
+ * environment such as an OSGi server.
+ *  
+ * @author Kirk Lund
+ */
+public interface ContainerTest {
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/4f077657/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HydraTest.java
----------------------------------------------------------------------
diff --git a/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HydraTest.java b/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HydraTest.java
new file mode 100755
index 0000000..4fe535b
--- /dev/null
+++ b/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HydraTest.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.junit.categories;
+/**
+ * JUnit Test Category that specifies a hydra test.
+ *  
+ * @author Kirk Lund
+ */
+public interface HydraTest {
+}


Mime
View raw message