geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kl...@apache.org
Subject [3/7] incubator-geode git commit: Change AssertionFailedError to AssertionError and general cleanup.
Date Thu, 02 Jun 2016 05:16:23 GMT
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FaultingInJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FaultingInJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FaultingInJUnitTest.java
index 76d8bc9..6a0e660 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FaultingInJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FaultingInJUnitTest.java
@@ -16,32 +16,27 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static org.junit.Assert.*;
+
 import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Tests faulting in from current oplog, old oplog
  * and htree for different modes (overflow only, persist+overflow : Sync/Async)
- * 
- *
  */
 @Category(IntegrationTest.class)
-public class FaultingInJUnitTest extends DiskRegionTestingBase
-{
-  protected volatile boolean hasBeenNotified;
-  
-  
+public class FaultingInJUnitTest extends DiskRegionTestingBase {
+
+  private volatile boolean hasBeenNotified;
+
   private DiskRegionProperties diskProps = new DiskRegionProperties();
   
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     deleteFiles();
     diskProps.setDiskDirs(dirs);
     diskProps.setCompactionThreshold(100);
@@ -49,19 +44,16 @@ public class FaultingInJUnitTest extends DiskRegionTestingBase
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
   }
   
-  @After
-  public void tearDown() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeDown();
     deleteFiles();
-    super.tearDown();
   }
 
   /**
    * fault in a value from teh current oplog
-   *
    */
-  void faultInFromCurrentOplog()
-  { 
+  private void faultInFromCurrentOplog() {
     put100Int();
     putTillOverFlow(region);
     region.put(new Integer(200), new Integer(200));
@@ -73,10 +65,8 @@ public class FaultingInJUnitTest extends DiskRegionTestingBase
 
   /**
    * fault in a value from an old oplog
-   *
    */
-  void faultInFromOldOplog()
-  {
+  private void faultInFromOldOplog() {
     put100Int();
     putTillOverFlow(region);
     region.put(new Integer(200), new Integer(200));
@@ -89,10 +79,8 @@ public class FaultingInJUnitTest extends DiskRegionTestingBase
 
   /**
    * fault in a value that has been copied forward by compaction
-   *
    */
-  void faultInFromCompactedOplog()
-  {
+  private void faultInFromCompactedOplog() {
     put100Int();
     putTillOverFlow(region);
     region.put(new Integer(101), new Integer(101));
@@ -147,89 +135,74 @@ public class FaultingInJUnitTest extends DiskRegionTestingBase
    * test OverflowOnly Sync Faultin  From CurrentOplog
    */
   @Test
-  public void testOverflowOnlyFaultinSyncFromCurrentOplog()
-  {
+  public void testOverflowOnlyFaultinSyncFromCurrentOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromCurrentOplog();
   }
 
   @Test
-  public void testOverflowOnlyFaultinSyncFromOldOplog()
-  {
+  public void testOverflowOnlyFaultinSyncFromOldOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromOldOplog();
   }
 
   @Test
-  public void testOverflowOnlyFaultinSyncFromCompactedOplog()
-  {
+  public void testOverflowOnlyFaultinSyncFromCompactedOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromCompactedOplog();
   }
 
   @Test
-  public void testOverflowOnlyFaultinAsyncFromCurrentOplog()
-  {
+  public void testOverflowOnlyFaultinAsyncFromCurrentOplog() {
     region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromCurrentOplog();
   }
 
   @Test
-  public void testOverflowOnlyFaultinAsyncFromOldOplog()
-  {
+  public void testOverflowOnlyFaultinAsyncFromOldOplog() {
     region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromOldOplog();
   }
 
   @Test
-  public void testOverflowOnlyFaultinAsyncFromCompactedOplog()
-  {
+  public void testOverflowOnlyFaultinAsyncFromCompactedOplog() {
     region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,diskProps);
     faultInFromCompactedOplog();
   }
   
   @Test
-  public void testOverflowAndPersistFaultinSyncFromCurrentOplog()
-  {
+  public void testOverflowAndPersistFaultinSyncFromCurrentOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromCurrentOplog();
   }
 
   @Test
-  public void testOverflowAndPersistFaultinSyncFromOldOplog()
-  {
+  public void testOverflowAndPersistFaultinSyncFromOldOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromOldOplog();
   }
 
   @Test
-  public void testOverflowAndPersistFaultinSyncFromCompactedOplog()
-  {
+  public void testOverflowAndPersistFaultinSyncFromCompactedOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromCompactedOplog();
   }
 
   @Test
-  public void testOverflowAndPersistFaultinAsyncFromCurrentOplog()
-  {
+  public void testOverflowAndPersistFaultinAsyncFromCurrentOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromCurrentOplog();
   }
 
   @Test
-  public void testOverflowAndPersistFaultinAsyncFromOldOplog()
-  {
+  public void testOverflowAndPersistFaultinAsyncFromOldOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromOldOplog();
   }
 
   @Test
-  public void testOverflowAndPersistFaultinAsyncFromCompactedOplog()
-  {
+  public void testOverflowAndPersistFaultinAsyncFromCompactedOplog() {
     region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,diskProps);
     faultInFromCompactedOplog();
   }
-
-
-  
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MultipleOplogsRollingFeatureJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MultipleOplogsRollingFeatureJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MultipleOplogsRollingFeatureJUnitTest.java
index 6ac9e60..c7e5d30 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MultipleOplogsRollingFeatureJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/MultipleOplogsRollingFeatureJUnitTest.java
@@ -16,12 +16,11 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
-import org.junit.After;
+import static org.junit.Assert.*;
+
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
@@ -29,26 +28,25 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * The test will verify <br>
  * 1. Multiple oplogs are being rolled at once <br>
  * 2. The Number of entries getting logged to the HTree are taking care of creation 
- * 
  */
 @Category(IntegrationTest.class)
-public class MultipleOplogsRollingFeatureJUnitTest extends
-    DiskRegionTestingBase
-{
+public class MultipleOplogsRollingFeatureJUnitTest extends DiskRegionTestingBase {
 
-  protected Object mutex = new Object();
+  private volatile boolean FLAG = false;
 
-  protected boolean CALLBACK_SET = false;
+  private Object mutex = new Object();
 
-  protected volatile boolean FLAG = false;
+  private boolean CALLBACK_SET = false;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @After
-  public void tearDown() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
-    super.tearDown();
+  }
+
+  @Override
+  protected final void postTearDown() throws Exception {
     diskProps.setDiskDirs(dirs);
   }
 
@@ -58,8 +56,7 @@ public class MultipleOplogsRollingFeatureJUnitTest extends
    * 2. The Number of entries are properly conflated
    */
   @Test
-  public void testMultipleRolling()
-  {
+  public void testMultipleRolling() {
     System.setProperty("gemfire.MAX_OPLOGS_PER_COMPACTION", "17");
     try {
       deleteFiles();
@@ -161,8 +158,7 @@ public class MultipleOplogsRollingFeatureJUnitTest extends
     }
   }
 
-  private void waitForCompactor(long maxWaitingTime)
-  {
+  private void waitForCompactor(long maxWaitingTime) {
     long maxWaitTime = maxWaitingTime;
     long start = System.currentTimeMillis();
     while (!FLAG) { // wait until
@@ -179,8 +175,7 @@ public class MultipleOplogsRollingFeatureJUnitTest extends
     }
   }
 
-  private void addEntries(int opLogNum, int valueSize)
-  {
+  private void addEntries(int opLogNum, int valueSize) {
     assertNotNull(region);
     byte[] val = new byte[valueSize];
     for (int i = 0; i < valueSize; ++i) {
@@ -217,8 +212,7 @@ public class MultipleOplogsRollingFeatureJUnitTest extends
     }
   }
 
-  private CacheObserver getCacheObserver()
-  {
+  private CacheObserver getCacheObserver() {
     return (new CacheObserverAdapter() {
 
       public void beforeGoingToCompact()
@@ -251,6 +245,5 @@ public class MultipleOplogsRollingFeatureJUnitTest extends
 
       }
     });
-
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
index d1be04d..b259b79 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
@@ -117,17 +117,13 @@ public class OplogJUnitTest extends DiskRegionTestingBase {
   protected volatile Thread rollerThread = null;
 
   @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = true;
   }
 
   @Override
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
+  protected final void postTearDown() throws Exception {
     DiskStoreImpl.SET_IGNORE_PREALLOCATE = false;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
index 6bac28b..bbd22fa 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
@@ -22,17 +22,16 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static org.junit.Assert.*;
+
 import java.io.File;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.StatisticsFactory;
 import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
@@ -41,19 +40,16 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * Testing methods for SimpleDiskRegion.java api's
  * 
  * @since 5.1
- *  
  */
 @Category(IntegrationTest.class)
-public class SimpleDiskRegionJUnitTest extends DiskRegionTestingBase
-{
+public class SimpleDiskRegionJUnitTest extends DiskRegionTestingBase {
 
-  protected Set keyIds = Collections.synchronizedSet(new HashSet());
+  private Set keyIds = Collections.synchronizedSet(new HashSet());
 
   private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncGetInMemPerfJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncGetInMemPerfJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncGetInMemPerfJUnitTest.java
index 49e07b4..8b9bbaf 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncGetInMemPerfJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncGetInMemPerfJUnitTest.java
@@ -16,41 +16,32 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import java.util.*;
+import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.*;
+import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
 import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
 import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
- * 
  * Disk region Perf test for Overflow only with ASync writes. 1) Performance of
  * get operation for entry in memory.
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegOverflowAsyncGetInMemPerfJUnitTest extends DiskRegionTestingBase
-{
+public class DiskRegOverflowAsyncGetInMemPerfJUnitTest extends DiskRegionTestingBase {
 
-  LogWriter log = null;
+  private static int counter = 0;
 
-  static int counter = 0;
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
 //    Properties properties = new Properties();
     diskProps.setBytesThreshold(10000l);
@@ -61,10 +52,8 @@ public class DiskRegOverflowAsyncGetInMemPerfJUnitTest extends DiskRegionTesting
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
     }
@@ -73,21 +62,17 @@ public class DiskRegOverflowAsyncGetInMemPerfJUnitTest extends DiskRegionTesting
     }
   }
 
- 
   private static int ENTRY_SIZE = 1024;
   
   /* OP_COUNT can be increased/decrease as per the requirement.
    * If required to be set as higher value such as 1000000
    * one needs to set the VM heap size accordingly.
    * (For example:Default setting in build.xml is <jvmarg value="-Xmx256M"/>
-   *    
    */
-  
   private static int OP_COUNT = 1000;
 
   @Test
-  public void testPopulatefor1Kbwrites()
-  {
+  public void testPopulatefor1Kbwrites() {
 //    RegionAttributes ra = region.getAttributes();
 //    final String key = "K";
     final byte[] value = new byte[ENTRY_SIZE];
@@ -129,8 +114,5 @@ public class DiskRegOverflowAsyncGetInMemPerfJUnitTest extends DiskRegionTesting
         + " bytes/sec=" + bytesPerSecGet;
     log.info(statsGet);
     System.out.println("Perf Stats of get which is in memory :" + statsGet);
-
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncJUnitPerformanceTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncJUnitPerformanceTest.java
index dcb6af1..be87bc1 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncJUnitPerformanceTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowAsyncJUnitPerformanceTest.java
@@ -16,12 +16,10 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.*;
 
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -40,17 +38,24 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * which will fault in.
  */
 @Category(IntegrationTest.class)
-public class DiskRegOverflowAsyncJUnitPerformanceTest extends DiskRegionTestingBase
-{
+public class DiskRegOverflowAsyncJUnitPerformanceTest extends DiskRegionTestingBase {
 
-  LogWriter log = null;
+  private static int ENTRY_SIZE = 1024 * 5;
+
+  /**
+   * Do not change the value OP_COUNT = 400
+   * The test case is dependent on this value.
+   */
+  private static int OP_COUNT = 400;
+
+  private static int HALF_OP_COUNT = OP_COUNT / 2;
+
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setTimeInterval(1000l);
     diskProps.setBytesThreshold(10000l);
@@ -58,13 +63,10 @@ public class DiskRegOverflowAsyncJUnitPerformanceTest extends DiskRegionTestingB
     region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
         diskProps);
     log = ds.getLogWriter();
-
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
     }
@@ -73,20 +75,8 @@ public class DiskRegOverflowAsyncJUnitPerformanceTest extends DiskRegionTestingB
     }
   }
 
- 
-  private static int ENTRY_SIZE = 1024 * 5;
-  
- /* Do not change the value OP_COUNT = 400
-  * The test case is dependent on this value.
-  */
-  
-  private static int OP_COUNT = 400;
-
-  private static int HALF_OP_COUNT = OP_COUNT / 2;
-
   @Test
-  public void testPopulatefor5Kbwrites()
-  {
+  public void testPopulatefor5Kbwrites() {
 //    RegionAttributes ra = region.getAttributes();
     LRUStatistics lruStats = getLRUStats(region);
     // Put in larger stuff until we start evicting
@@ -153,15 +143,10 @@ public class DiskRegOverflowAsyncJUnitPerformanceTest extends DiskRegionTestingB
         + " bytes/sec=" + bytesPerSecGet;
     log.info(statsGet);
     System.out.println("Perf Stats of get which is fauting in :" + statsGet);
-
   }
 
-  protected LRUStatistics getLRUStats(Region region)
-  {
+  private LRUStatistics getLRUStats(Region region) {
     return ((LocalRegion)region).getEvictionController().getLRUHelper()
         .getStats();
-
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest.java
index 95a456c..223f04e 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest.java
@@ -16,16 +16,12 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import java.util.*;
+import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.*;
+import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
 import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
 import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
@@ -34,35 +30,36 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 /**
  * Disk region Perf test for Overflow only with Sync writes. 1) Performance of
  * get operation for entry in memory.
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest extends DiskRegionTestingBase
-{
+public class DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest extends DiskRegionTestingBase {
+
+  private static int ENTRY_SIZE = 1024;
+
+  private static int OP_COUNT = 10000;
 
-  LogWriter log = null;
+  private static int counter = 0;
 
-  static int counter = 0;
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
+  @Override
+  protected final void preSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
-    super.setUp();
+  }
 
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setOverFlowCapacity(100000);
     region = DiskRegionHelperFactory
-        .getSyncOverFlowOnlyRegion(cache, diskProps);
-    
+      .getSyncOverFlowOnlyRegion(cache, diskProps);
+
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
     }
@@ -71,15 +68,8 @@ public class DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest extends DiskReg
     }
   }
 
-  
-
-  private static int ENTRY_SIZE = 1024;
-
-  private static int OP_COUNT = 10000;
-
   @Test
-  public void testPopulatefor1Kbwrites()
-  {
+  public void testPopulatefor1Kbwrites() {
 //    RegionAttributes ra = region.getAttributes();
 //    final String key = "K";
     final byte[] value = new byte[ENTRY_SIZE];
@@ -122,8 +112,5 @@ public class DiskRegOverflowSyncGetInMemPerfJUnitPerformanceTest extends DiskReg
         + " bytes/sec=" + bytesPerSecGet;
     log.info(statsGet);
     System.out.println("Perf Stats of get which is in memory :" + statsGet);
-
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncJUnitPerformanceTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncJUnitPerformanceTest.java
index 208b39f..945a3e5 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncJUnitPerformanceTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegOverflowSyncJUnitPerformanceTest.java
@@ -16,22 +16,20 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import java.util.*;
+import static org.junit.Assert.*;
+
+import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.*;
-import com.gemstone.gemfire.cache.*;
+import com.gemstone.gemfire.LogWriter;
+import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
 import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
 import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
-import com.gemstone.gemfire.internal.cache.*;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -40,19 +38,26 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
  * which will fault in.
  */
 @Category(IntegrationTest.class)
-public class DiskRegOverflowSyncJUnitPerformanceTest extends DiskRegionTestingBase
-{
+public class DiskRegOverflowSyncJUnitPerformanceTest extends DiskRegionTestingBase {
+
+  private static int ENTRY_SIZE = 1024 * 5;
+
+  /**
+   * Do not change the value OP_COUNT = 400
+   * The test case is dependent on this value.
+   */
+  private static int OP_COUNT = 400;
+
+  private static int HALF_OP_COUNT = OP_COUNT / 2;
 
-  LogWriter log = null;
+  private static int counter = 0;
 
-  static int counter = 0;
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setOverFlowCapacity(1000);
     region = DiskRegionHelperFactory
@@ -61,10 +66,8 @@ public class DiskRegOverflowSyncJUnitPerformanceTest extends DiskRegionTestingBa
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  @Override
+  protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
     }
@@ -73,20 +76,8 @@ public class DiskRegOverflowSyncJUnitPerformanceTest extends DiskRegionTestingBa
     }
   }
 
- 
-  private static int ENTRY_SIZE = 1024 * 5;
-
-  /* Do not change the value OP_COUNT = 400
-   * The test case is dependent on this value.
-   */
-  
-  private static int OP_COUNT = 400;
-
-  private static int HALF_OP_COUNT = OP_COUNT / 2;
-
   @Test
-  public void testPopulatefor5Kbwrites()
-  {
+  public void testPopulatefor5Kbwrites() throws Exception {
 //    RegionAttributes ra = region.getAttributes();
 
     LRUStatistics lruStats = getLRUStats(region);
@@ -155,15 +146,11 @@ public class DiskRegOverflowSyncJUnitPerformanceTest extends DiskRegionTestingBa
         + " bytes/sec=" + bytesPerSecGet;
     log.info(statsGet);
     System.out.println("Perf Stats of get which is fauting in :" + statsGet);
-
   }
 
-  protected LRUStatistics getLRUStats(Region region)
-  {
+  private LRUStatistics getLRUStats(Region region) {
     return ((LocalRegion)region).getEvictionController().getLRUHelper()
         .getStats();
-
   }
 
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
index b014cb2..296ae00 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
@@ -55,10 +55,8 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
 
   DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setRegionName("OverflowAsyncRollingOpLogRegion");
     diskProps.setDiskDirs(dirs);
     this.log = ds.getLogWriter();
@@ -69,13 +67,10 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
     diskProps.setMaxOplogSize(10485760l);
     region = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
         diskProps);
-
   }
 
   @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
+  protected final void postTearDown() throws Exception {
     if (cache != null) {
       cache.close();
     }
@@ -84,8 +79,6 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
     }
   }
 
-  
-
   @Test
   public void testGetPerfRollingOpog()
   {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
index d94cf73..da0bb5e 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
@@ -51,10 +51,8 @@ public class DiskRegionOverflowSyncRollingOpLogJUnitTest extends
 
   DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     this.log = ds.getLogWriter();
     diskProps.setRolling(true);
@@ -62,16 +60,8 @@ public class DiskRegionOverflowSyncRollingOpLogJUnitTest extends
     diskProps.setCompactionThreshold(100);
     region = DiskRegionHelperFactory
         .getSyncOverFlowOnlyRegion(cache, diskProps);
-
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-    
-  }
-  
   @Test
   public void testGetPerfRollingOpog()
   {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java
index 5ec4af8..feef5c7 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java
@@ -16,17 +16,15 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
+import static org.junit.Assert.*;
+
 import java.io.File;
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
@@ -37,43 +35,28 @@ import com.gemstone.gemfire.test.junit.categories.PerformanceTest;
 /**
  * Consolidated Disk Region Perftest. Overflow, Persist, OverflowWithPersist
  * modes are tested for Sync, AsyncWithBuffer and AsyncWithoutBufer writes.
- *  
  */
 @Category(PerformanceTest.class)
 @Ignore("Tests have no assertions")
-public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
-{
-  LogWriter log = null;
+public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase {
 
-  String stats = null;
+  private static int counter = 0;
 
-  String stats_ForSameKeyputs = null;
+  private LogWriter log = null;
 
-  static int counter = 0;
+  private String stats = null;
 
-  // protected static File[] dirs = null;
+  private String stats_ForSameKeyputs = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-
-  }
-
-  //*********Test Cases **************
-  //********Overflowonly tests *********
   @Test
-  public void testOverflowSync1()
-  {
+  public void testOverflowSync1() throws Exception {
     try {
       //Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowSync1Dir1");
@@ -116,8 +99,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
   } //end of testOverflowSync1
 
   @Test
-  public void testOverflowASyncWithBuffer2()
-  {
+  public void testOverflowASyncWithBuffer2() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowASyncWithBuffer2Dir1");
@@ -161,8 +143,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
      } //end of testOverflowASyncWithBuffer2
 
   @Test
-  public void testOverflowASyncWithoutBuffer3()
-  {
+  public void testOverflowASyncWithoutBuffer3() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowASyncWithoutBuffer3Dir1");
@@ -207,10 +188,8 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
     
   } //end of testOverflowASyncWithoutBuffer3
 
-  //******** PersistOnly Tests ****************
   @Test
-  public void testpersistSync4()
-  {
+  public void testpersistSync4() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testpersistSync4Dir1");
@@ -252,8 +231,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
   } //end of testPersistSync4
 
   @Test
-  public void testpersistASyncWithBuffer5()
-  {
+  public void testpersistASyncWithBuffer5() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testpersistASyncWithBuffer5Dir1");
@@ -298,8 +276,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
   } //end of testPersistASyncWithBuffer5
 
   @Test
-  public void testPersistASyncWithoutBuffer6()
-  {
+  public void testPersistASyncWithoutBuffer6() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistASyncWithoutBuffer6Dir1");
@@ -345,10 +322,8 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
     
   } //end of testPersistASyncWithoutBuffer
 
-  //*************Persist with Overflow tests ****************
   @Test
-  public void testPersistOverflowSync7()
-  {
+  public void testPersistOverflowSync7() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowSync7Dir1");
@@ -392,8 +367,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
   } //end of testPersistOverflowSync
 
   @Test
-  public void testPersistOverflowASyncWithBuffer8()
-  {
+  public void testPersistOverflowASyncWithBuffer8() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowASyncWithBuffer8Dir1");
@@ -440,8 +414,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
   } //end of testpersistOverflowASyncWithBuffer8
 
   @Test
-  public void testPersistOverflowASyncWithoutBuffer9()
-  {
+  public void testPersistOverflowASyncWithoutBuffer9() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowASyncWithoutBuffer9Dir1");
@@ -487,7 +460,6 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
     
   } //end of testPersistOverflowASyncWithoutBuffer9
 
-  //************** test data population *******************
   public static int ENTRY_SIZE = 1024;
   
   /**
@@ -495,15 +467,12 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
    * If required to be set as higher value such as 1000000
    * one needs to set the VM heap size accordingly.
    * (For example:Default setting in build.xml is <jvmarg value="-Xmx256M"/>
-   *    
    */
-  
   public static int OP_COUNT = 100;
 
   public static boolean UNIQUE_KEYS = Boolean.getBoolean("DRP.UNIQUE_KEYS");
 
-  public void populateData()
-  {
+  public void populateData() {
     //Put for validation.
     putForValidation(region);
     
@@ -530,8 +499,7 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
     log.info(stats);
   }
 
-  public void populateDataPutOnSameKey()
-  {
+  public void populateDataPutOnSameKey() {
 //  Put for validation.
     putForValidation(region);
     final byte[] value = new byte[ENTRY_SIZE];
@@ -556,15 +524,13 @@ public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase
     log.info(stats_ForSameKeyputs);
   }
 
-  protected static void deleteFiles()
-  {
+  protected static void deleteFiles() {
     for (int i = 0; i < 4; i++) {
       File[] files = dirs[i].listFiles();
       for (int j = 0; j < files.length; j++) {
         files[j].delete();
       }
     }
-
   }
 
 }// end of DiskRegionPerfJUnitTest

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPersistOnlySyncJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPersistOnlySyncJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPersistOnlySyncJUnitTest.java
index 6dd9867..14b0197 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPersistOnlySyncJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPersistOnlySyncJUnitTest.java
@@ -16,13 +16,10 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -37,20 +34,16 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Disk region perf test for Persist only with sync writes.
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegionPersistOnlySyncJUnitTest extends DiskRegionTestingBase
-{
+public class DiskRegionPersistOnlySyncJUnitTest extends DiskRegionTestingBase {
 
-  LogWriter log = null;
+  private LogWriter log = null;
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     diskProps.setPersistBackup(true);
     region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
@@ -58,29 +51,19 @@ public class DiskRegionPersistOnlySyncJUnitTest extends DiskRegionTestingBase
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-  }
-
-  
-
   private static int ENTRY_SIZE = 1024;
   
   /* OP_COUNT can be increased/decrease as per the requirement.
    * If required to be set as higher value such as 1000000
    * one needs to set the VM heap size accordingly.
    * (For example:Default setting in build.xml is <jvmarg value="-Xmx256M"/>
-   *    
    */
   private static int OP_COUNT = 1000;
 
   private static boolean UNIQUE_KEYS = Boolean.getBoolean("DRP.UNIQUE_KEYS");
 
   @Test
-  public void testPopulate1kbwrites()
-  {
+  public void testPopulate1kbwrites() {
     RegionAttributes ra = region.getAttributes();
 //    final String key = "K";
     final byte[] value = new byte[ENTRY_SIZE];
@@ -172,8 +155,7 @@ public class DiskRegionPersistOnlySyncJUnitTest extends DiskRegionTestingBase
   }
 
   @Test
-  public void testPopulate5kbwrites()
-  {
+  public void testPopulate5kbwrites() {
     ENTRY_SIZE = 1024 * 5;
     
     /* OP_COUNT can be increased/decrease as per the requirement.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionRollOpLogJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionRollOpLogJUnitPerformanceTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionRollOpLogJUnitPerformanceTest.java
index 1c06543..10a6f20 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionRollOpLogJUnitPerformanceTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionRollOpLogJUnitPerformanceTest.java
@@ -16,66 +16,52 @@
  */
 package com.gemstone.gemfire.internal.cache.diskPerf;
 
-import java.util.*;
+import static org.junit.Assert.*;
+
 import java.io.File;
+import java.util.Arrays;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.*;
-
-import com.gemstone.gemfire.*;
+import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.Scope;
-import com.gemstone.gemfire.internal.cache.*;
+import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory;
+import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
+import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
  * Consolidated Disk Region Perftest. Overflow, Persist, OverflowWithPersist
  * modes are tested for Sync, AsyncWithBuffer and AsyncWithoutBufer writes.
  * Roling oplog is set to true with maxOplogSize = 20 mb
- *  
  */
 @Category(IntegrationTest.class)
-public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBase
-{
+public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBase {
 
-  DiskRegionProperties diskProps = new DiskRegionProperties();
+  private DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  LogWriter log = null;
+  private LogWriter log = null;
 
-  String stats = null;
+  private String stats = null;
 
-  String stats_ForSameKeyputs = null;
+  private String stats_ForSameKeyputs = null;
 
   /**
    * To run DiskRegionRollOpLogPerfJUnitTest to produce the Perf numbers set
    * runPerfTest to true. Also ,one needs to set the VM heap size accordingly.
    * (For example:Default setting in build.xml is <jvmarg value="-Xmx256M"/>
    */
-  boolean runPerfTest = false;
+  private boolean runPerfTest = false;
 
-  @Before
-  public void setUp() throws Exception
-  {
-    super.setUp();
+  @Override
+  protected final void postSetUp() throws Exception {
     diskProps.setDiskDirs(dirs);
     log = ds.getLogWriter();
   }
 
-  @After
-  public void tearDown() throws Exception
-  {
-    super.tearDown();
-
-  }
-
-  //*********Test Cases **************
-  //********Overflowonly tests *********
   @Test
-  public void testOverflowSyncRollOlg1()
-  {
+  public void testOverflowSyncRollOlg1() {
     try {
       //Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowSyncRollOlg1Dir1");
@@ -124,8 +110,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testOverflowSync1
 
   @Test
-  public void testOverflowASyncWithBufferRollOlg2()
-  {
+  public void testOverflowASyncWithBufferRollOlg2() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowASyncWithBufferRollOlg2Dir1");
@@ -177,8 +162,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testOverflowASyncWithBuffer2
 
   @Test
-  public void testOverflowASyncWithoutBufferRollOlg3()
-  {
+  public void testOverflowASyncWithoutBufferRollOlg3() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testOverflowASyncWithoutBufferRollOlg3Dir1");
@@ -229,10 +213,8 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
     deleteFiles();
   } //end of testOverflowASyncWithoutBuffer3
 
-  //******** PersistOnly Tests ****************
   @Test
-  public void testpersistSyncRollOlg4()
-  {
+  public void testpersistSyncRollOlg4() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testpersistSyncRollOlg4Dir1");
@@ -281,8 +263,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testPersistSync4
 
   @Test
-  public void testpersistASyncWithBufferRollOlg5()
-  {
+  public void testpersistASyncWithBufferRollOlg5() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testpersistASyncWithBufferRollOlg5Dir1");
@@ -334,8 +315,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testPersistASyncWithBuffer5
 
   @Test
-  public void testPersistASyncWithoutBufferRollOlg6()
-  {
+  public void testPersistASyncWithoutBufferRollOlg6() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistASyncWithoutBufferRollOlg6Dir1");
@@ -386,10 +366,8 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
     closeDown();
   } //end of testPersistASyncWithoutBuffer
 
-  //*************Persist with Overflow tests ****************
   @Test
-  public void testPersistOverflowSyncRollOlg7()
-  {
+  public void testPersistOverflowSyncRollOlg7() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowSyncRollOlg7Dir1");
@@ -440,8 +418,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testPersistOverflowSync
 
   @Test
-  public void testPersistOverflowASyncWithBufferRollOlg8()
-  {
+  public void testPersistOverflowASyncWithBufferRollOlg8() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowASyncWithBufferRollOlg8Dir1");
@@ -494,8 +471,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
   } //end of testpersistOverflowASyncWithBuffer8
 
   @Test
-  public void testPersistOverflowASyncWithoutBufferRollOlg9()
-  {
+  public void testPersistOverflowASyncWithoutBufferRollOlg9() {
     try {
       //    Create four Dirs for Disk Dirs
       File file1 = new File("testPersistOverflowASyncWithoutBufferRollOlg9Dir1");
@@ -547,7 +523,6 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
     closeDown();
   } //end of testPersistOverflowASyncWithoutBuffer9
 
-  //************** test data population *******************
   public static int ENTRY_SIZE = 1024;
 
   /**
@@ -555,15 +530,12 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
    * be set as higher value such as 1000000, one needs to set the VM heap size
    * accordingly. (For example:Default setting in build.xml is <jvmarg
    * value="-Xmx256M"/>
-   *  
    */
-
   public static int OP_COUNT = 1000;
 
   public static boolean UNIQUE_KEYS = Boolean.getBoolean("DRP.UNIQUE_KEYS");
 
-  public void populateData0to60k()
-  {
+  public void populateData0to60k() {
     final byte[] value = new byte[ENTRY_SIZE];
     Arrays.fill(value, (byte)77);
     for (int i = 0; i < 60000; i++) {
@@ -571,11 +543,9 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
       // System.out.println(i);
     }
     System.out.println(" done with putting first 60k entries");
-
   }
 
-  public void populateData60kto100k()
-  {
+  public void populateData60kto100k() {
     //  Put for validation.
     putForValidation(region);
     final byte[] value = new byte[ENTRY_SIZE];
@@ -599,8 +569,7 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
     log.info(stats);
   }
 
-  public void populateDataPutOnSameKey()
-  {
+  public void populateDataPutOnSameKey() {
     //  Put for validation.
     putForValidation(region);
     final byte[] value = new byte[ENTRY_SIZE];
@@ -624,15 +593,12 @@ public class DiskRegionRollOpLogJUnitPerformanceTest extends DiskRegionTestingBa
     log.info(stats_ForSameKeyputs);
   }
 
-  protected static void deleteFiles()
-  {
+  protected static void deleteFiles() {
     for (int i = 0; i < 4; i++) {
       File[] files = dirs[i].listFiles();
       for (int j = 0; j < files.length; j++) {
         files[j].delete();
       }
     }
-
   }
-
 }// end of DiskRegionRollOpLogPerfJUnitTest

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bb91dedc/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
index 6c572b1..61c9b45 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache.ha;
 
+import static com.gemstone.gemfire.test.dunit.Assert.*;
+
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -26,8 +28,6 @@ import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import junit.framework.Assert;
-
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.Cache;
@@ -45,39 +45,27 @@ import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.HARegion;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
 import com.gemstone.gemfire.test.junit.categories.FlakyTest;
 
-public class HARegionQueueDUnitTest extends DistributedTestCase {
-  VM vm0 = null;
-
-  VM vm1 = null;
-
-  VM vm3 = null;
+public class HARegionQueueDUnitTest extends JUnit4DistributedTestCase {
 
-  VM vm2 = null;
+  private static volatile boolean toCnt = true;
+  private static volatile Thread createQueuesThread;
 
-  protected static Cache cache = null;
+  private static Cache cache = null;
+  private static HARegionQueue hrq = null;
+  private static Thread[] opThreads;
 
-  protected static HARegionQueue hrq = null;
-
-//  private static int counter = 0;
-
-  protected static volatile boolean toCnt = true;
-
-  protected static Thread opThreads[];
-  
-  protected static volatile Thread createQueuesThread;
-
-  /** constructor */
-  public HARegionQueueDUnitTest(String name) {
-    super(name);
-  }
+  private VM vm0 = null;
+  private VM vm1 = null;
+  private VM vm3 = null;
+  private VM vm2 = null;
 
   /**
    * get the VM's
@@ -100,13 +88,16 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invoke(() -> HARegionQueueDUnitTest.closeCache());
     vm2.invoke(() -> HARegionQueueDUnitTest.closeCache());
     vm3.invoke(() -> HARegionQueueDUnitTest.closeCache());
+    
+    cache = null;
+    hrq = null;
+    opThreads = null;
   }
 
   /**
    * create cache
    */
-  protected Cache createCache() throws CacheException
-  {
+  private Cache createCache() throws CacheException {
     Properties props = new Properties();
     DistributedSystem ds = getSystem(props);
     ds.disconnect();
@@ -125,10 +116,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * assert that the put has not propagated from VM1 to VM2 4) do a put in VM2
    * 5) assert that the value in VM1 has not changed to due to put in VM2 6)
    * assert put in VM2 was successful by doing a get
-   *
    */
-  public void testLocalPut()
-  {
+  @Test
+  public void testLocalPut() throws Exception {
     vm0.invoke(() -> HARegionQueueDUnitTest.createRegion());
     vm1.invoke(() -> HARegionQueueDUnitTest.createRegion());
     vm0.invoke(() -> HARegionQueueDUnitTest.putValue1());
@@ -146,10 +136,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * assert respective puts the VMs were successful by doing a get 7)
    * localDestroy key in VM1 8) assert key has been destroyed in VM1 9) assert
    * key has not been destroyed in VM2
-   *
    */
-  public void testLocalDestroy()
-  {
+  @Test
+  public void testLocalDestroy() throws Exception {
     vm0.invoke(() -> HARegionQueueDUnitTest.createRegion());
     vm1.invoke(() -> HARegionQueueDUnitTest.createRegion());
     vm0.invoke(() -> HARegionQueueDUnitTest.putValue1());
@@ -167,10 +156,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * value in VM1 to assert put has happened successfully 4) Create mirrored
    * HARegion region1 in VM2 5) do a get in VM2 to verify that value was got
    * through GII 6) do a put in VM2 7) assert put in VM2 was successful
-   *
    */
-  public void testGII()
-  {
+  @Test
+  public void testGII() throws Exception {
     vm0.invoke(() -> HARegionQueueDUnitTest.createRegion());
     vm0.invoke(() -> HARegionQueueDUnitTest.putValue1());
     vm0.invoke(() -> HARegionQueueDUnitTest.getValue1());
@@ -178,37 +166,16 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invoke(() -> HARegionQueueDUnitTest.getValue1());
     vm1.invoke(() -> HARegionQueueDUnitTest.putValue2());
     vm1.invoke(() -> HARegionQueueDUnitTest.getValue2());
-
   }
 
   /**
-   * Tests the relevant data structures are updated after GII happens.
-   *
-   * In this test, a HARegion is created in vm0. 10 conflatable objects are put
-   * in vm0's region HARegion is then created in vm1. After region creation, the
-   * verification whether the relevant data structuers have been updated is
-   * done.
-   *
-   */
- /* public void testGIIAndMapUpdates()
-  {
-    vm0.invoke(() -> HARegionQueueDUnitTest.createRegionQueue2());
-    vm0.invoke(() -> HARegionQueueDUnitTest.putConflatables());
-    vm1.invoke(() -> HARegionQueueDUnitTest.createRegionQueue2());
-    vm0.invoke(() -> HARegionQueueDUnitTest.clearRegion());
-    vm1.invoke(() -> HARegionQueueDUnitTest.verifyMapsAndData());
-
-  } */
-
-  /**
    * 1) Create mirrored HARegion region1 in VM1 2) do a put in VM1 3) get teh
    * value in VM1 to assert put has happened successfully 4) Create mirrored
    * HARegion region1 in VM2 5) do a get in VM2 to verify that value was got
    * through GII 6) do a put in VM2 7) assert put in VM2 was successful
-   *
    */
-  public void testQRM()
-  {
+  @Test
+  public void testQRM() throws Exception {
     vm0.invoke(() -> HARegionQueueDUnitTest.createRegionQueue());
     vm1.invoke(() -> HARegionQueueDUnitTest.createRegionQueue());
     vm0.invoke(() -> HARegionQueueDUnitTest.verifyAddingDispatchMesgs());
@@ -217,29 +184,18 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
   }
 
   /**
-   * 1)Create regionqueue on VM0 and VM1 2) put same conflated object from VM1
-   * aand VM2 3)perform take() operation from VM0 4) Wait for the QRM to
-   * execute. 4)check the size of the regionqueue in VM1. It should be zero
-   * because QRM should remove entry from the regionqueue of VM1
-   * 
-   * 
-   */
-  
-  /**
    * Behaviour of take() has been changed for reliable messaging feature. Region queue take()
    * operation will no longer add to the Dispatch Message Map. Hence disabling the test - SUYOG
    *
    * Test for #35988 HARegionQueue.take() is not functioning as expected
    */
-  @Ignore("TODO")
+  @Ignore("TODO: this test was disabled")
   @Test
   public void testBugNo35988() throws Exception {
-    
-    CacheSerializableRunnable createQueue = new CacheSerializableRunnable(
-        "CreateCache, HARegionQueue and start thread") {
-      public void run2() throws CacheException
-      {
-        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest("region1");
+    CacheSerializableRunnable createQueue = new CacheSerializableRunnable("CreateCache, HARegionQueue and start thread") {
+      @Override
+      public void run2() throws CacheException {
+        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
         //TODO:ASIF: Bcoz of the QRM thread cannot take frequency below
         // 1 second , thus we need to carfully evaluate what to do. Though
         //in this case 1 second instead of 500 ms will work
@@ -256,8 +212,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
               new byte[] { 0 }, 1, 1), false, "dummy"));
         }
         catch (Exception e) {
-          throw new CacheException(e) {
-          };
+          throw new AssertionError(e);
         }
       }
     };
@@ -265,28 +220,28 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invoke(createQueue);
 
     vm0.invoke(new CacheSerializableRunnable("takeFromVm0") {
+      @Override
       public void run2() throws CacheException {
         try {
           Conflatable obj = (Conflatable)hrq.take();
           assertNotNull(obj);
         }
         catch (Exception e) {
-          throw new CacheException(e) {
-          };
+          throw new AssertionError(e);
         }
       }
     });
 
-
-
     vm1.invoke(new CacheSerializableRunnable("checkInVm1") {
-      public void run2() throws CacheException
-      {
+      @Override
+      public void run2() throws CacheException {
         WaitCriterion ev = new WaitCriterion() {
+          @Override
           public boolean done() {
             Thread.yield(); // TODO is this necessary?
             return hrq.size() == 0;
           }
+          @Override
           public String description() {
             return null;
           }
@@ -299,32 +254,18 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
   /**
    * create a client with 2 regions sharing a common writer
-   *
-   * @throws Exception
    */
-
-  public static void createRegion() throws Exception
-  {
-    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-        "HARegionQueueDUnitTest_region");
+  private static void createRegion() throws Exception {
+    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
     cache = test.createCache();
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setDataPolicy(DataPolicy.REPLICATE);
-    HARegion.getInstance("HARegionQueueDUnitTest_region", (GemFireCacheImpl)cache,
-        null, factory.create());
+    HARegion.getInstance("HARegionQueueDUnitTest_region", (GemFireCacheImpl)cache, null, factory.create());
   }
 
-  /**
-   *
-   *
-   * @throws Exception
-   */
-
-  public static void createRegionQueue() throws Exception
-  {
-    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-        "HARegionQueueDUnitTest_region");
+  private static void createRegionQueue() throws Exception   {
+    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
     cache = test.createCache();
     /*
      * AttributesFactory factory = new AttributesFactory();
@@ -342,13 +283,10 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
         "HARegionQueueDUnitTest_region");
     hrq.put(c1);
     hrq.put(c2);
-
   }
 
-  public static void createRegionQueue2() throws Exception
-  {
-    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-        "HARegionQueueDUnitTest_region");
+  private static void createRegionQueue2() throws Exception {
+    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
     cache = test.createCache();
     /*
      * AttributesFactory factory = new AttributesFactory();
@@ -362,8 +300,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
         HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
   }
 
-  public static void clearRegion()
-  {
+  private static void clearRegion() {
     try {
       Iterator iterator = hrq.getRegion().keys().iterator();
       while (iterator.hasNext()) {
@@ -371,31 +308,31 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception e) {
-      fail("Exception occured while trying to destroy region");
+      fail("Exception occured while trying to destroy region", e);
     }
 
   }
 
-  public static void verifyAddingDispatchMesgs()
-  {
-    Assert.assertTrue(HARegionQueue.getDispatchedMessagesMapForTesting()
+  private static void verifyAddingDispatchMesgs() {
+    assertTrue(HARegionQueue.getDispatchedMessagesMapForTesting()
         .isEmpty());
     hrq.addDispatchedMessage(new ThreadIdentifier(new byte[1], 1), 1);
-    Assert.assertTrue(!HARegionQueue.getDispatchedMessagesMapForTesting()
+    assertTrue(!HARegionQueue.getDispatchedMessagesMapForTesting()
         .isEmpty());
   }
 
-  public static void verifyDispatchedMessagesRemoved()
+  private static void verifyDispatchedMessagesRemoved()
   {
     try {
       final Region region = hrq.getRegion();
-      // wait until we have a dead
-      // server
+      // wait until we have a dead server
       WaitCriterion ev = new WaitCriterion() {
+        @Override
         public boolean done() {
           Thread.yield(); // TODO is this necessary?
           return region.get(new Long(0)) == null;
         }
+        @Override
         public String description() {
           return null;
         }
@@ -413,16 +350,14 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
     }
     catch (Exception e) {
-      fail("test failed due to an exception :  " + e);
+      fail("test failed due to an exception", e);
     }
   }
 
   /**
    * close the cache
-   * 
    */
-  public static void closeCache()
-  {
+  private static void closeCache() {
     if (cache != null && !cache.isClosed()) {
       cache.close();
       cache.getDistributedSystem().disconnect();
@@ -431,22 +366,18 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
   /**
    * do puts on key-1
-   *
    */
-  public static void putValue1()
-  {
+  private static void putValue1() {
     try {
       Region r1 = cache.getRegion("/HARegionQueueDUnitTest_region");
       r1.put("key-1", "value-1");
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
+      fail("failed while region.put()", ex);
     }
   }
 
-  public static void putConflatables()
-  {
+  private static void putConflatables() {
     try {
       Region r1 = hrq.getRegion();
       for (int i = 1; i < 11; i++) {
@@ -456,54 +387,51 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
+      fail("failed while region.put()", ex);
     }
   }
 
   /**
    * verifies the data has been populated correctly after GII
-   *
    */
-  public static void verifyMapsAndData()
-  {
+  private static void verifyMapsAndData() {
     try {
       HARegion r1 = (HARegion)hrq.getRegion();
       // region should not be null
-      Assert.assertNotNull(" Did not expect the HARegion to be null but it is",
+      assertNotNull(" Did not expect the HARegion to be null but it is",
           r1);
       // it should have ten non null entries
       for (int i = 1; i < 11; i++) {
-        Assert.assertNotNull(" Did not expect the entry to be null but it is",
+        assertNotNull(" Did not expect the entry to be null but it is",
             r1.get(new Long(i)));
       }
       // HARegionQueue should not be null
-      Assert.assertNotNull(
+      assertNotNull(
           " Did not expect the HARegionQueue to be null but it is", hrq);
 
       Map conflationMap = hrq.getConflationMapForTesting();
       // conflationMap size should be greater than 0
-      Assert.assertTrue(
+      assertTrue(
           " Did not expect the conflationMap size to be 0 but it is",
           conflationMap.size() > 0);
       Map internalMap = (Map)conflationMap.get("HARegionQueueDUnitTest_region");
       // internal map should not be null. it should be present
-      Assert.assertNotNull(
+      assertNotNull(
           " Did not expect the internalMap to be null but it is", internalMap);
       // get and verify the entries in the conflation map.
       for (int i = 1; i < 11; i++) {
-        Assert.assertTrue(
+        assertTrue(
             " Did not expect the entry not to be equal but it is", internalMap
                 .get("key" + i).equals(new Long(i)));
       }
       Map eventMap = hrq.getEventsMapForTesting();
       // DACE should not be null
-      Assert.assertNotNull(
+      assertNotNull(
           " Did not expect the result (DACE object) to be null but it is",
           eventMap.get(new ThreadIdentifier(new byte[] { 1 }, 1)));
       Set counterSet = hrq.getCurrentCounterSet(new EventID(new byte[] { 1 },
           1, 1));
-      Assert.assertTrue(
+      assertTrue(
           " excpected the counter set size to be 10 but it is not so",
           counterSet.size() == 10);
       long i = 1;
@@ -511,12 +439,12 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       // verify the order of the iteration. it should be 1 - 10. The underlying
       // set is a LinkedHashSet
       while (iterator.hasNext()) {
-        Assert.assertTrue(((Long)iterator.next()).longValue() == i);
+        assertTrue(((Long)iterator.next()).longValue() == i);
         i++;
       }
       // The last dispactchde sequence Id should be -1 since no dispatch has
       // been made
-      Assert.assertTrue(hrq.getLastDispatchedSequenceId(new EventID(
+      assertTrue(hrq.getLastDispatchedSequenceId(new EventID(
           new byte[] { 1 }, 1, 1)) == -1);
 
       // sleep for 8.0 seconds. Everythign should expire and everything should
@@ -524,8 +452,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       Thread.sleep(7500);
 
       for (int j = 1; j < 11; j++) {
-        Assert
-            .assertNull(
+        assertNull(
                 "expected the entry to be null since expiry time exceeded but it is not so",
                 r1.get(new Long(j)));
       }
@@ -533,50 +460,41 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       internalMap = (Map)hrq.getConflationMapForTesting().get(
           "HARegionQueueDUnitTest_region");
 
-      Assert.assertNotNull(
+      assertNotNull(
           " Did not expect the internalMap to be null but it is", internalMap);
-      Assert
-          .assertTrue(
+      assertTrue(
               "internalMap (conflation) should have been emptry since expiry of all entries has been exceeded but it is not so",
               internalMap.isEmpty());
-      Assert
-          .assertTrue(
+      assertTrue(
               "eventMap should have been emptry since expiry of all entries has been exceeded but it is not so",
               eventMap.isEmpty());
-      Assert
-          .assertTrue(
+      assertTrue(
               "counter set should have been emptry since expiry of all entries has been exceeded but it is not so",
               counterSet.isEmpty());
 
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
+      fail("failed while region.put()", ex);
     }
   }
 
   /**
    * do puts on key-1,value-2
-   *
    */
-  public static void putValue2()
-  {
+  private static void putValue2() {
     try {
       Region r1 = cache.getRegion("/HARegionQueueDUnitTest_region");
       r1.put("key-1", "value-2");
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
+      fail("failed while region.put()", ex);
     }
   }
 
   /**
    * do a get on region1
-   *
    */
-  public static void getValue1()
-  {
+  private static void getValue1() {
     try {
       Region r = cache.getRegion("/HARegionQueueDUnitTest_region");
       if (!(r.get("key-1").equals("value-1"))) {
@@ -585,17 +503,14 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
+      fail("failed while region.get()", ex);
     }
   }
 
   /**
    * do a get on region1
-   *
    */
-  public static void getNull()
-  {
+  private static void getNull() {
     try {
       Region r = cache.getRegion("/HARegionQueueDUnitTest_region");
       if (!(r.get("key-1") == (null))) {
@@ -604,17 +519,14 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
+      fail("failed while region.get()", ex);
     }
   }
 
   /**
    * do a get on region1
-   *
    */
-  public static void getValue2()
-  {
+  public static void getValue2() {
     try {
       Region r = cache.getRegion("/HARegionQueueDUnitTest_region");
       if (!(r.get("key-1").equals("value-2"))) {
@@ -623,24 +535,20 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
     }
     catch (Exception ex) {
-      ex.printStackTrace();
-      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
+      fail("failed while region.get()", ex);
     }
   }
 
   /**
    * destroy key-1
-   *
    */
-  public static void destroy()
-  {
+  public static void destroy() {
     try {
       Region region1 = cache.getRegion("/HARegionQueueDUnitTest_region");
       region1.localDestroy("key-1");
     }
     catch (Exception e) {
-      e.printStackTrace();
-      fail("test failed due to exception in destroy ");
+      fail("test failed due to exception in destroy", e);
     }
   }
 
@@ -649,11 +557,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * peek , batch peek operations in multiple regions. The test will have
    * take/remove occuring in all the VMs. This test is targetted to test for
    * hang or exceptions in non blocking queue.
-   *
-   *
    */
-  public void testConcurrentOperationsDunitTestOnNonBlockingQueue()
-  {
+  @Test
+  public void testConcurrentOperationsDunitTestOnNonBlockingQueue() throws Exception {
     concurrentOperationsDunitTest(false, Scope.DISTRIBUTED_ACK);
   }
 
@@ -662,11 +568,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * peek , batch peek operations in multiple regions. The test will have
    * take/remove occuring in all the VMs. This test is targetted to test for
    * hang or exceptions in non blocking queue.
-   *
-   *
    */
-  public void testConcurrentOperationsDunitTestOnNonBlockingQueueWithDNoAckRegion()
-  {
+  @Test
+  public void testConcurrentOperationsDunitTestOnNonBlockingQueueWithDNoAckRegion() throws Exception {
     concurrentOperationsDunitTest(false, Scope.DISTRIBUTED_NO_ACK);
   }
 
@@ -675,25 +579,19 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * peek , batch peek operations in multiple regions. The test will have
    * take/remove occuring in all the VMs. This test is targetted to test for
    * hang or exceptions in blocking queue.
-   *
-   *
    */
-  public void testConcurrentOperationsDunitTestOnBlockingQueue()
-  {
+  @Test
+  public void testConcurrentOperationsDunitTestOnBlockingQueue() throws Exception {
     concurrentOperationsDunitTest(true, Scope.DISTRIBUTED_ACK);
   }
 
-  private void concurrentOperationsDunitTest(
-      final boolean createBlockingQueue, final Scope rscope)
-  {
+  private void concurrentOperationsDunitTest(final boolean createBlockingQueue, final Scope rscope) {
     // Create Cache and HARegionQueue in all the 4 VMs.
 
-    CacheSerializableRunnable createRgnsAndQueues = new CacheSerializableRunnable(
-        "CreateCache, mirrored Region & HARegionQueue with a CacheListener") {
-      public void run2() throws CacheException
-      {
-        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-            "HARegionQueueDUnitTest_region");
+    CacheSerializableRunnable createRgnsAndQueues = new CacheSerializableRunnable("CreateCache, mirrored Region & HARegionQueue with a CacheListener") {
+      @Override
+      public void run2() throws CacheException {
+        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
         System.getProperties()
             .put("QueueRemovalThreadWaitTime", "2000");
         cache = test.createCache();
@@ -713,12 +611,11 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
           }
         }
         catch (Exception e) {
-          throw new CacheException(e) {
-          };
+          throw new AssertionError(e);
         }
         factory.addCacheListener(new CacheListenerAdapter() {
-          public void afterCreate(final EntryEvent event)
-          {
+          @Override
+          public void afterCreate(final EntryEvent event) {
             Conflatable conflatable = new ConflatableObject(event.getKey(),
                 event.getNewValue(), ((EntryEventImpl)event).getEventId(),
                 false, event.getRegion().getFullPath());
@@ -727,14 +624,12 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
               hrq.put(conflatable);
             }
             catch (Exception e) {
-              e.printStackTrace();
-              fail("The put operation in queue did not succeed due to exception ="
-                  + e);
+              fail("The put operation in queue did not succeed due to exception =", e);
             }
           }
 
-          public void afterUpdate(final EntryEvent event)
-          {
+          @Override
+          public void afterUpdate(final EntryEvent event) {
             Conflatable conflatable = new ConflatableObject(event.getKey(),
                 event.getNewValue(), ((EntryEventImpl)event).getEventId(),
                 true, event.getRegion().getFullPath());
@@ -743,9 +638,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
               hrq.put(conflatable);
             }
             catch (Exception e) {
-              e.printStackTrace();
-              fail("The put operation in queue did not succeed due to exception ="
-                  + e);
+              fail("The put operation in queue did not succeed due to exception =", e);
             }
           }
 
@@ -760,11 +653,10 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invoke(createRgnsAndQueues);
     vm2.invoke(createRgnsAndQueues);
     vm3.invoke(createRgnsAndQueues);
-    CacheSerializableRunnable spawnThreadsAndperformOps = new CacheSerializableRunnable(
-        "Spawn multipe threads which do various operations") {
+    CacheSerializableRunnable spawnThreadsAndperformOps = new CacheSerializableRunnable("Spawn multiple threads which do various operations") {
 
-      public void run2() throws CacheException
-      {
+      @Override
+      public void run2() throws CacheException {
         opThreads = new Thread[4 + 2 + 2 + 2];
         for (int i = 0; i < 4; ++i) {
           opThreads[i] = new Thread(new RunOp(RunOp.PUT, i), "ID="
@@ -801,13 +693,14 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
       Thread.sleep(2000);
     }
     catch (InterruptedException e1) {
-      fail("Test failed as the test thread encoutered exception in sleep");
+      fail("Test failed as the test thread encoutered exception in sleep", e1);
     }
+
     // Asif : In case of blocking HARegionQueue do some extra puts so that the
     // blocking threads
     // are exited
-    CacheSerializableRunnable toggleFlag = new CacheSerializableRunnable(
-        "Toggle the flag to signal end of threads") {
+    CacheSerializableRunnable toggleFlag = new CacheSerializableRunnable("Toggle the flag to signal end of threads") {
+      @Override
       public void run2() throws CacheException {
         toCnt = false;
         if (createBlockingQueue) {
@@ -818,8 +711,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
             }
           }
           catch (Exception e) {
-            throw new CacheException(e) {
-            };
+            throw new AssertionError(e);
           }
         }
 
@@ -830,16 +722,10 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invokeAsync(toggleFlag);
     vm2.invokeAsync(toggleFlag);
     vm3.invokeAsync(toggleFlag);
-//     try {
-//       Thread.sleep(5000);
-//     }
-//     catch (InterruptedException e2) {
-//       fail("Test failed as the test thread encoutered exception in sleep");
-//     }
-    CacheSerializableRunnable joinWithThreads = new CacheSerializableRunnable(
-        "Join with the threads") {
-      public void run2() throws CacheException
-      {
+
+    CacheSerializableRunnable joinWithThreads = new CacheSerializableRunnable("Join with the threads") {
+      @Override
+      public void run2() throws CacheException {
         for (int i = 0; i < opThreads.length; ++i) {
 
           if (opThreads[i].isInterrupted()) {
@@ -861,20 +747,21 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
    * This is to test the bug which is caused when HARegionQueue object hasnot
    * been fully constructed but as the HARegion has got constructed , it gets
    * visible to QRM Message Thread.
+   *
+   * TODO: this test runs too long! Shorten run time. 1m 40s on new Mac.
    */
   @Category(FlakyTest.class) // GEODE-690: async queuing, time sensitive, expiration, waitForCriterion, joins
-  public void testNPEDueToHARegionQueueEscapeInConstructor()
-  {
+  @Test
+  public void testNPEDueToHARegionQueueEscapeInConstructor() {
     // changing EXPIRY_TIME to 5 doesn't change how long the test runs!
     final int EXPIRY_TIME = 30; // test will run for this many seconds
     // Create two HARegionQueue 's in the two VMs. The frequency of QRM thread
     // should be high
     // Check for NullPointeException in the other VM.
-    CacheSerializableRunnable createQueuesAndThread = new CacheSerializableRunnable(
-        "CreateCache, HARegionQueue and start thread") {
-      public void run2() throws CacheException
-      {
-        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest("region1");
+    CacheSerializableRunnable createQueuesAndThread = new CacheSerializableRunnable("CreateCache, HARegionQueue and start thread") {
+      @Override
+      public void run2() throws CacheException {
+        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
         //TODO:ASIF: Bcoz of the QRM thread cannot take frequency below
         // 1 second , thus we need to carfully evaluate what to do. 
         //For this bug to appear ,without bugfix , qrm needs to run
@@ -897,8 +784,8 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
           }
           opThreads = new Thread[1];
           opThreads[0] = new Thread(new Runnable() {
-            public void run()
-            {
+            @Override
+            public void run() {
               for (int i = 0; i < OP_COUNT; ++i) {
                 try {
                   Object o = hrq.take();
@@ -907,7 +794,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
                   }
                 }
                 catch (InterruptedException e) {
-                  fail("interrupted");
+                  throw new AssertionError(e);
                 }
               }
             }
@@ -916,18 +803,16 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
 
         }
         catch (Exception e) {
-          throw new CacheException(e) {
-          };
+          throw new AssertionError(e);
         }
       }
     };
 
-    CacheSerializableRunnable createQueues = new CacheSerializableRunnable(
-        "CreateCache, HARegionQueue ") {
-      public void run2() throws CacheException
-      {
+    CacheSerializableRunnable createQueues = new CacheSerializableRunnable("CreateCache, HARegionQueue ") {
+      @Override
+      public void run2() throws CacheException {
         createQueuesThread = Thread.currentThread();
-        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest("region1");
+        HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
         //System.getProperties().put("QueueRemovalThreadWaitTime",
          //   new Long(120000));
         cache = test.createCache();
@@ -940,19 +825,20 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
               HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
         }
         catch (Exception e) {
-          throw new CacheException(e) {
-          };
+          throw new AssertionError(e);
         }
       }
     };
     
-    CacheSerializableRunnable waitForCreateQueuesThread = new CacheSerializableRunnable(
-        "joinCreateCache") {
+    CacheSerializableRunnable waitForCreateQueuesThread = new CacheSerializableRunnable("joinCreateCache") {
+      @Override
       public void run2() {
         WaitCriterion ev = new WaitCriterion() {
+          @Override
           public boolean done() {
             return createQueuesThread != null;
           }
+          @Override
           public String description() {
             return null;
           }
@@ -965,10 +851,9 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm0.invoke(createQueuesAndThread);
     vm1.invokeAsync(createQueues);
 
-    CacheSerializableRunnable joinWithThread = new CacheSerializableRunnable(
-        "CreateCache, HARegionQueue join with thread") {
-      public void run2() throws CacheException
-      {
+    CacheSerializableRunnable joinWithThread = new CacheSerializableRunnable("CreateCache, HARegionQueue join with thread") {
+      @Override
+      public void run2() throws CacheException {
         if (opThreads[0].isInterrupted()) {
           fail("The test has failed as it encountered interrupts in puts & takes");
         }
@@ -979,28 +864,23 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     vm1.invoke(waitForCreateQueuesThread);
   }
 
-  class RunOp implements Runnable
-  {
+  private static class RunOp implements Runnable {
 
-    int opType;
+    private static final int PUT = 1;
+    private static final int TAKE = 2;
+    private static final int PEEK = 3;
+    private static final int BATCH_PEEK = 4;
 
-    int threadID;
-
-    public static final int PUT = 1;
-
-    public static final int TAKE = 2;
-
-    public static final int PEEK = 3;
-
-    public static final int BATCH_PEEK = 4;
+    private int opType;
+    private int threadID;
 
     public RunOp(int opType, int id) {
       this.opType = opType;
       this.threadID = id;
     }
 
-    public void run()
-    {
+    @Override
+    public void run() {
       Region rgn = cache.getRegion("test_region");
       int counter = 0;
       LogWriter logger = cache.getLogger();
@@ -1052,35 +932,17 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
         }
       }
       catch (Exception e) {
-        Thread.currentThread().interrupt();
+        throw new AssertionError(e);
       }
     }
   }
 
   /**
-   * This is to test the bug which is caused when HARegionQueue object hasnot
-   * been fully constructed but as the HARegion has got constructed , it gets
-   * visible to expiry thread task causing NullPointerException in some
-   * situations.
-   *
-   */
- /* public void testBugNo35989()
-  {
-    vm0.invoke(() -> HARegionQueueDUnitTest.createRegionQueue());
-    vm1.invoke(() -> HARegionQueueDUnitTest.createHARegionQueueandCheckExpiration());
-
-  } */
-
-  /**
    * Checks the data received by GII, only gets expired after proper
    * construction of HARegionQueue object.
-   *
-   * @throws Exception
    */
-  public static void createHARegionQueueandCheckExpiration() throws Exception
-  {
-    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-        "HARegionQueueDUnitTest_region");
+  private static void createHARegionQueueandCheckExpiration() throws Exception {
+    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
     cache = test.createCache();
     HARegionQueueAttributes attrs = new HARegionQueueAttributes();
     attrs.setExpiryTime(1);
@@ -1090,9 +952,11 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     // wait until we have a dead
     // server
     WaitCriterion ev = new WaitCriterion() {
+      @Override
       public boolean done() {
         return hrq.getAvalaibleIds().size() == 0;
       }
+      @Override
       public String description() {
         return null;
       }
@@ -1101,21 +965,17 @@ public class HARegionQueueDUnitTest extends DistributedTestCase {
     // assertIndexDetailsEquals(0, hrq.getAvalaibleIds().size());
   }
 
-  public void testForDuplicateEvents()
-  {
+  @Test
+  public void testForDuplicateEvents() throws Exception {
     vm0.invoke(() -> HARegionQueueDUnitTest.createRegionQueue());
     vm1.invoke(() -> HARegionQueueDUnitTest.createRegionQueueandCheckDuplicates());
   }
 
   /**
    *  HARegionQueue should not allow data with duplicate EventIds.
-   *
-   * @throws Exception
    */
-  public static void createRegionQueueandCheckDuplicates() throws Exception
-  {
-    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest(
-        "HARegionQueueDUnitTest_region");
+  private static void createRegionQueueandCheckDuplicates() throws Exception {
+    HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
     cache = test.createCache();
 
     hrq = HARegionQueue.getHARegionQueueInstance("HARegionQueueDUnitTest_region", cache,



Mime
View raw message