geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bschucha...@apache.org
Subject [02/14] incubator-geode git commit: jgroups source is now removed from asf branch GEODE-77 and is a downloaded dependency. Able to start/stop a locator and a server via gfsh but there is no HA or authentication in the membership system.
Date Mon, 27 Jul 2015 20:27:46 GMT
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
index 0bcda0b..7f1a922 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
@@ -27,24 +27,16 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.DistributionException;
 import com.gemstone.gemfire.distributed.internal.DistributionManager;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
-import com.gemstone.gemfire.distributed.internal.InternalLocator;
 import com.gemstone.gemfire.distributed.internal.MembershipListener;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
+import com.gemstone.gemfire.distributed.internal.membership.MembershipManager;
 import com.gemstone.gemfire.distributed.internal.membership.MembershipTestHook;
 import com.gemstone.gemfire.distributed.internal.membership.NetView;
-import com.gemstone.gemfire.distributed.internal.membership.jgroup.JGroupMembershipManager;
-import com.gemstone.gemfire.distributed.internal.membership.jgroup.MembershipManagerHelper;
-import com.gemstone.gemfire.internal.Assert;
+import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManagerHelper;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.internal.logging.LocalLogWriter;
-import com.gemstone.org.jgroups.Event;
-import com.gemstone.org.jgroups.JChannel;
-import com.gemstone.org.jgroups.protocols.FD_SOCK;
-import com.gemstone.org.jgroups.protocols.PingWaiter;
-import com.gemstone.org.jgroups.protocols.pbcast.GMS;
-import com.gemstone.org.jgroups.stack.Protocol;
 
 import dunit.AsyncInvocation;
 import dunit.DistributedTestCase;
@@ -268,8 +260,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
 //        boolean setting = GemFireTracer.DEBUG;
         try {
           System.setProperty("p2p.joinTimeout", "5000"); // set a short join timeout.  default is 17000ms
-//          GemFireTracer.DEBUG = true;
-          PingWaiter.TEST_HOOK_IGNORE_REQUIRED_RESPONSE = true;
+          fail("ignore required response must be implemented for the jgroups replacement");
+//          PingWaiter.TEST_HOOK_IGNORE_REQUIRED_RESPONSE = true;
           Locator myLocator = Locator.startLocatorAndDS(port1, new File("testBug30341Locator1.log"), properties);
           myLocator.stop();
         } catch (SystemConnectException e) {
@@ -277,8 +269,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
         } catch (GemFireConfigException e) {
           return Boolean.TRUE;
         } finally {
-//          GemFireTracer.DEBUG = setting;
-          PingWaiter.TEST_HOOK_IGNORE_REQUIRED_RESPONSE = false;
+//          PingWaiter.TEST_HOOK_IGNORE_REQUIRED_RESPONSE = false;
           System.getProperties().remove("p2p.joinTimeout");
         }
         return Boolean.FALSE;
@@ -521,23 +512,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
           public void run() {
             Locator loc = Locator.getLocators().iterator().next();
             DistributedSystem msys = loc.getDistributedSystem();
-            MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
-            MembershipManagerHelper.playDead(msys);
-            JChannel c = MembershipManagerHelper.getJChannel(msys);
-            Protocol udp = c.getProtocolStack().findProtocol("UDP");
-            udp.stop();
-            udp.passUp(new Event(Event.EXIT, new Exception("killing locator's ds")));
-            try {
-              MembershipManagerHelper.getJChannel(msys).waitForClose();
-            }
-            catch (InterruptedException ie) {
-              Thread.currentThread().interrupt();
-              // attempt rest of work with interrupt bit set
-            }
+            MembershipManagerHelper.crashDistributedSystem(msys);
             loc.stop();
-//            LogWriter bLogger =
-//              new LocalLogWriter(InternalLogWriter.ALL_LEVEL, System.out);
-            MembershipManagerHelper.inhibitForcedDisconnectLogging(false);
           }
       };
 
@@ -664,22 +640,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
           public void run() {
             Locator loc = Locator.getLocators().iterator().next();
             DistributedSystem msys = loc.getDistributedSystem();
-            MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
-            MembershipManagerHelper.playDead(msys);
-            JChannel c = MembershipManagerHelper.getJChannel(msys);
-            Protocol udp = c.getProtocolStack().findProtocol("UDP");
-            udp.stop();
-            udp.passUp(new Event(Event.EXIT, new Exception("killing locators ds")));
-            try {
-              MembershipManagerHelper.getJChannel(msys).waitForClose();
-            }
-            catch (InterruptedException ie) {
-              Thread.currentThread().interrupt();
-              // attempt rest of work with interrupt bit set
-            }
-//            loc.stop();
-//            LogWriter bLogger =
-//              new LocalLogWriter(InternalLogWriter.ALL_LEVEL, System.out);
+            loc.stop();
           }
       };
 
@@ -709,7 +670,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
       try {
         // a new view won't be installed for 10 seconds (5*member-timeout of 2000)
         // so we'll detect loss of two members in one view
-        GMS.TEST_HOOK_SLOW_VIEW_CASTING=10;
+        fail("slow view casting must be implemented for the jgroups replacement");
+//        GMS.TEST_HOOK_SLOW_VIEW_CASTING=10;
 
         // disconnect the first vm and locator to demonstrate that the third vm and the
         // locator notice the failure and notify of quorum loss
@@ -831,16 +793,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             msys.getLogWriter().info("<ExpectedException action=add>service failure</ExpectedException>");
             msys.getLogWriter().info("<ExpectedException action=add>com.gemstone.gemfire.ConnectException</ExpectedException>");
             msys.getLogWriter().info("<ExpectedException action=add>com.gemstone.gemfire.ForcedDisconnectException</ExpectedException>");
-            MembershipManagerHelper.playDead(msys);
-            JChannel c = MembershipManagerHelper.getJChannel(msys);
-            Protocol udp = c.getProtocolStack().findProtocol("UDP");
-            udp.stop();
-            udp.passUp(new Event(Event.EXIT, new Exception("killing members ds")));
-            try {
-              MembershipManagerHelper.getJChannel(msys).waitForClose();
-            } catch (InterruptedException ie) {
-              Thread.currentThread().interrupt();
-            }
+            MembershipManagerHelper.crashDistributedSystem(msys);
           }
       };
 
@@ -988,28 +941,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             msys.getLogWriter().info("<ExpectedException action=add>com.gemstone.gemfire.ConnectException</ExpectedException>");
             msys.getLogWriter().info("<ExpectedException action=add>com.gemstone.gemfire.ForcedDisconnectException</ExpectedException>");
             msys.getLogWriter().info("<ExpectedException action=add>Possible loss of quorum</ExpectedException>");
-
-            JChannel c = MembershipManagerHelper.getJChannel(msys);
-            JChannelTestHook hook = new JChannelTestHook();
-            MembershipManagerHelper.addTestHook(msys, hook);
-            try {
-              MembershipManagerHelper.playDead(msys);
-              Protocol udp = c.getProtocolStack().findProtocol("UDP");
-              udp.stop();
-              udp.passUp(new Event(Event.EXIT, new ForcedDisconnectException("killing members ds")));
-              
-              c.setClosed(true);
-              try {
-                MembershipManagerHelper.getJChannel(msys).waitForClose();
-              }
-              catch (InterruptedException ie) {
-                Thread.currentThread().interrupt();
-                // attempt rest of work with interrupt bit set
-              }
-            } finally {
-              MembershipManagerHelper.removeTestHook(msys, hook);
-              hook.reset();
-            }
+            MembershipManagerHelper.crashDistributedSystem(msys);
           }
       };
 
@@ -1047,16 +979,14 @@ public class LocatorDUnitTest extends DistributedTestCase {
         @Override
         public void run() {
           DistributedSystem msys = InternalDistributedSystem.getAnyInstance();
-          JGroupMembershipManager jmm = MembershipManagerHelper.getMembershipManager(msys);
-          
-          JChannel c = MembershipManagerHelper.getJChannel(msys);
+          MembershipManager mmgr = MembershipManagerHelper.getMembershipManager(msys);
           
-          // check for shutdown cause in JGroupsMembershipManager. Following call should
+          // check for shutdown cause in MembershipManager. Following call should
           // throw DistributedSystemDisconnectedException which should have cause as
           // ForceDisconnectException.
           try {
             msys.getLogWriter().info("<ExpectedException action=add>Membership: requesting removal of </ExpectedException>");
-            jmm.requestMemberRemoval(mem1, "test reasons");
+            mmgr.requestMemberRemoval(mem1, "test reasons");
             msys.getLogWriter().info("<ExpectedException action=remove>Membership: requesting removal of </ExpectedException>");
             
             fail("It should have thrown exception in requestMemberRemoval");
@@ -1065,11 +995,6 @@ public class LocatorDUnitTest extends DistributedTestCase {
             assertTrue(
                 "This should have been ForceDisconnectException but found "
                     + cause, cause instanceof ForcedDisconnectException);
-          } finally {
-            c.getProtocolStack().stop();
-            c.getProtocolStack().destroy();
-//            c.getTestHook().reset();
-//            c.unregisterTestHook();
           }
         }
       });
@@ -1170,18 +1095,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             msys.getLogWriter().info("<ExpectedException action=add>service failure</ExpectedException>");
             msys.getLogWriter().info("<ExpectedException action=add>com.gemstone.gemfire.ForcedDisconnectException</ExpectedException>");
             msys.getLogWriter().info("<ExpectedException action=add>com.gemstone.gemfire.ConnectException</ExpectedException>");
-            MembershipManagerHelper.playDead(msys);
-            JChannel c = MembershipManagerHelper.getJChannel(msys);
-            Protocol udp = c.getProtocolStack().findProtocol("UDP");
-            udp.stop();
-            udp.passUp(new Event(Event.EXIT, new Exception("killing locator's ds")));
-            try {
-              MembershipManagerHelper.getJChannel(msys).waitForClose();
-            }
-            catch (InterruptedException ie) {
-              Thread.currentThread().interrupt();
-              // attempt rest of work with interrupt bit set
-            }
+            MembershipManagerHelper.crashDistributedSystem(msys);
             loc.stop();
           }
       };
@@ -1431,8 +1345,6 @@ public class LocatorDUnitTest extends DistributedTestCase {
 
     system = (InternalDistributedSystem)getSystem(props);
 
-    assertTrue(waitUntilFDConnected(30000)); // FD_SOCK must connect in order for normal-disconnect feature to be tested here
-    
     final DistributedMember coord = MembershipManagerHelper.getCoordinator(system);
     getLogWriter().info("coordinator before termination of locator is " + coord);
 
@@ -1520,34 +1432,6 @@ public class LocatorDUnitTest extends DistributedTestCase {
     return false;
   }
   
-  public static boolean anyNormalFDDisconnects() {
-    DistributedSystem sys = InternalDistributedSystem.getAnyInstance();
-    if (sys != null && sys.isConnected()) {
-      FD_SOCK fdProtocol = (FD_SOCK)MembershipManagerHelper.getJChannel(sys).getProtocolStack().findProtocol("FD_SOCK");
-      if (fdProtocol.normalDisconnectCount > 0) {
-        getLogWriter().warning("Found " + fdProtocol.normalDisconnectCount + " normal-status disconnects have happened.  Expected none since other members crashed");
-        return true;
-      }
-    }
-    return false;
-  }
-  
-  public static boolean waitUntilFDConnected(long timeout) {
-    DistributedSystem sys = InternalDistributedSystem.getAnyInstance();
-    if (sys != null && sys.isConnected()) {
-      FD_SOCK fdProtocol = (FD_SOCK)MembershipManagerHelper.getJChannel(sys).getProtocolStack().findProtocol("FD_SOCK");
-      long endTime = System.currentTimeMillis() + timeout;
-      while (!fdProtocol.isConnectedToPingDest && System.currentTimeMillis() < endTime) {
-        try {
-          Thread.sleep(1000);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-        }
-      }
-      return fdProtocol.isConnectedToPingDest;
-    }
-    return false;
-  }
 
   static boolean beforeFailureNotificationReceived;
   static boolean afterFailureNotificationReceived;
@@ -1989,24 +1873,12 @@ public class LocatorDUnitTest extends DistributedTestCase {
         };
     vm0.invoke(connect);
     
-    int count = ((InternalLocator)locator).getLocatorHandler().getMemberCount();
-    Assert.assertTrue( count == 2, "The number of members in the discovery set should be 2 but is " + count);
-    
     getLogWriter().info("Stopping locator");
     locator.stop();
     
     getLogWriter().info("Starting locator");
     locator = Locator.startLocatorAndDS(port1, logFile, p);
     
-    // the count could be 2 or 3, depending on whether the locator happened to
-    // reuse the membership port that it had last time.  Having a count of 1
-    // means that it did not recover from disk and did not find the member in
-    // vm0
-    count = ((InternalLocator)locator).getLocatorHandler().getMemberCount();
-    Assert.assertTrue( count > 1,
-         "The number of members in the discovery set should be > 1 but is " + count
-         +"; locator recovery failed!");
-    
     vm0.invoke(new SerializableRunnable("disconnect") {
       public void run() {
         DistributedSystem.connect(p).disconnect();
@@ -2080,47 +1952,15 @@ public class LocatorDUnitTest extends DistributedTestCase {
     sys.getLogWriter().info("<ExpectedException action=add>com.gemstone.gemfire.ConnectException</ExpectedException>");
     sys.getLogWriter().info("<ExpectedException action=add>com.gemstone.gemfire.ForcedDisconnectException</ExpectedException>");
     try {
-      MembershipManagerHelper.playDead(sys);
-      JChannel c = MembershipManagerHelper.getJChannel(sys);
-      Protocol udp = c.getProtocolStack().findProtocol("UDP");
-      udp.stop();
-      udp.passUp(new Event(Event.EXIT, new Exception("killing locator's ds")));
-      MembershipManagerHelper.getJChannel(sys).waitForClose();
+      MembershipManagerHelper.crashDistributedSystem(sys);
     }
     catch (DistributedSystemDisconnectedException se) {
       // it's okay for the system to already be shut down
     }
-    catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      // attempt rest of work with interrupt bit set
-    }
     sys.getLogWriter().info("<ExpectedException action=remove>service failure</ExpectedException>");
     sys.getLogWriter().info("<ExpectedException action=remove>com.gemstone.gemfire.ForcedDisconnectException</ExpectedException>");
   }
 
-  //New test hook which blocks before closing channel.
-  class JChannelTestHook implements com.gemstone.org.jgroups.debug.JChannelTestHook {
-
-    volatile boolean unboundedWait = true;
-
-    public void reset() {
-      unboundedWait = false;
-    }
-
-
-    @Override
-    public void beforeChannelClosing(String string, Throwable cause) {
-      InternalDistributedSystem.getAnyInstance().getLogWriter().info("Inside JChannelTestHook.beforeChannelClosing with " + cause);
-    // stop here for a while and check for shutdown cause in
-    // JGroupsMembershipManager.
-      if (cause instanceof ForcedDisconnectException) {
-        while (unboundedWait) {
-          pause(500);
-        }
-      }
-    }
-
-  }
   
   class MyMembershipListener implements MembershipListener {
     boolean quorumLostInvoked;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorJUnitTest.java
index 0aeacb3..db7c217 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorJUnitTest.java
@@ -39,9 +39,6 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID;
 import com.gemstone.gemfire.management.internal.JmxManagerAdvisor.JmxManagerProfile;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-import com.gemstone.org.jgroups.stack.GossipClient;
-import com.gemstone.org.jgroups.stack.GossipData;
-import com.gemstone.org.jgroups.stack.IpAddress;
 
 import dunit.DistributedTestCase;
 import dunit.DistributedTestCase.WaitCriterion;
@@ -105,37 +102,9 @@ public class LocatorJUnitTest {
     Assert.assertTrue(info.length > 1);
   }
 
-  public void _testPeerOnly() throws Exception {
-    locator = Locator.startLocator(port, tmpFile);
-    Assert.assertEquals(locator, Locator.getLocators().iterator().next());
-    Thread.sleep(1000);
-    final GossipClient client = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port),  500);
-    client.register("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55),5000, false);
-    WaitCriterion ev = new WaitCriterion() {
-      public boolean done() {
-        try {
-          Vector members = client.getMembers("mygroup1",
-              new IpAddress(InetAddress.getLocalHost(), 55), true,5000);
-          return members.size() == 1;
-        }
-        catch (Exception e) {
-          e.printStackTrace();
-          fail("unexpected exception");
-        }
-        return false; // NOTREACHED
-      }
-      public String description() {
-        return null;
-      }
-    };
-    DistributedTestCase.waitForCriterion(ev, 1000, 200, true);
-    Vector members = client.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55), true,5000);
-    Assert.assertEquals(1, members.size());
-    Assert.assertEquals(new IpAddress(InetAddress.getLocalHost(), 55), members.get(0));
-  }
-
   @Test
   public void testServerOnly() throws Exception {
+    fail("testServerOnly must be fixed for the jgroups replacement");
     Properties props = new Properties();
     props.setProperty("mcast-port", "0");
     props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
@@ -144,8 +113,9 @@ public class LocatorJUnitTest {
     Assert.assertTrue(locator.isServerLocator());
     Thread.sleep(1000);
     try {
-      GossipData request = new GossipData(GossipData.REGISTER_REQ, "group", new IpAddress(InetAddress.getLocalHost(), 55), null, null);
-      TcpClient.requestToServer(InetAddress.getLocalHost(), port, request, REQUEST_TIMEOUT);
+      // TODO fix this part of the test
+//      GossipData request = new GossipData(GossipData.REGISTER_REQ, "group", new IpAddress(InetAddress.getLocalHost(), 55), null, null);
+//      TcpClient.requestToServer(InetAddress.getLocalHost(), port, request, REQUEST_TIMEOUT);
       Assert.fail("Should have got an exception");
     } catch (Exception expected) {
 //      expected.printStackTrace();
@@ -186,15 +156,17 @@ public class LocatorJUnitTest {
   //TODO - test durable queue discovery, excluded servers, server groups.
 
   private void doGossip()  throws Exception {
-    final GossipClient client = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port),  500);
-    client.register("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55),5000, false);
+    fail("doGossip must be fixed for the jgroups replacement");
+    // TODO
+//    final GossipClient client = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port),  500);
+//    client.register("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55),5000, false);
     WaitCriterion ev = new WaitCriterion() {
       public boolean done() {
         try {
-          Vector members = client.getMembers("mygroup1",
-              new IpAddress(InetAddress.getLocalHost(), 55), true,5000);
-//          System.out.println("members in mygroup1: " + members);
-          return members.size() == 1;
+          // TODO
+//          Vector members = client.getMembers("mygroup1",
+//              new IpAddress(InetAddress.getLocalHost(), 55), true,5000);
+//          return members.size() == 1;
         }
         catch (Exception e) {
           e.printStackTrace();
@@ -207,8 +179,9 @@ public class LocatorJUnitTest {
       }
     };
     DistributedTestCase.waitForCriterion(ev, 1 * 1000, 200, true);
-    Vector members = client.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55), true,5000);
-    Assert.assertEquals(new IpAddress(InetAddress.getLocalHost(), 55), members.get(0));
+    // TODO
+//    Vector members = client.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55), true,5000);
+//    Assert.assertEquals(new IpAddress(InetAddress.getLocalHost(), 55), members.get(0));
   }
 
   private void doServerLocation() throws Exception {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
index cb1d1d2..5d7e9a0 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
@@ -33,12 +33,12 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
-import com.gemstone.gemfire.distributed.internal.membership.jgroup.JGroupMembershipManager;
-import com.gemstone.gemfire.distributed.internal.membership.jgroup.MembershipManagerHelper;
+import com.gemstone.gemfire.distributed.internal.membership.MembershipManager;
+import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManagerHelper;
+import com.gemstone.gemfire.distributed.internal.membership.gms.mgr.GMSMembershipManager;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.internal.tcp.Stub;
-import com.gemstone.org.jgroups.protocols.pbcast.GMS;
 
 import dunit.DistributedTestCase;
 import dunit.Host;
@@ -132,9 +132,10 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
    */
   public void testConnectAfterBeingShunned() {
     InternalDistributedSystem sys = getSystem();
-    JGroupMembershipManager mgr = MembershipManagerHelper.getMembershipManager(sys);
+    MembershipManager mgr = MembershipManagerHelper.getMembershipManager(sys);
     InternalDistributedMember idm = mgr.getLocalMember();
-    System.setProperty("gemfire.jg-bind-port", ""+idm.getPort());
+    // TODO GMS needs to have a system property allowing the bind-port to be set
+//    System.setProperty("gemfire.jg-bind-port", ""+idm.getPort());
     try {
       sys.disconnect();
       sys = getSystem();
@@ -162,7 +163,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
     VM vm0 = Host.getHost(0).getVM(0);
 
     InternalDistributedSystem sys = getSystem();
-    JGroupMembershipManager mgr = MembershipManagerHelper.getMembershipManager(sys);
+    MembershipManager mgr = MembershipManagerHelper.getMembershipManager(sys);
 
     try {
       InternalDistributedMember mbr = new InternalDistributedMember(
@@ -172,10 +173,10 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
       
       // if the view number isn't being recorded correctly the test will pass but the
       // functionality is broken
-      Assert.assertTrue("expected view ID to be greater than zero", mgr.getView().getViewNumber() > 0);
+      Assert.assertTrue("expected view ID to be greater than zero", mgr.getView().getViewId() > 0);
 
       int oldViewId = mbr.getVmViewId();
-      mbr.setVmViewId((int)mgr.getView().getViewNumber()-1);
+      mbr.setVmViewId((int)mgr.getView().getViewId()-1);
       getLogWriter().info("current membership view is " + mgr.getView());
       getLogWriter().info("created ID " + mbr + " with view ID " + mbr.getVmViewId());
       sys.getLogWriter().info("<ExpectedException action=add>attempt to add old member</ExpectedException>");
@@ -192,7 +193,8 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
       // now forcibly add it as a surprise member and show that it is reaped
       long gracePeriod = 5000;
       long startTime = System.currentTimeMillis();
-      long birthTime = startTime - mgr.getSurpriseMemberTimeout() + gracePeriod;
+      long timeout = ((GMSMembershipManager)mgr).getSurpriseMemberTimeout();
+      long birthTime = startTime - timeout + gracePeriod;
       MembershipManagerHelper.addSurpriseMember(sys, mbr, birthTime);
       assertTrue("Member was not a surprise member", mgr.isSurpriseMember(mbr));
       
@@ -204,7 +206,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
       };
       vm0.invoke(connectDisconnect);
       
-      if (birthTime < (System.currentTimeMillis() - mgr.getSurpriseMemberTimeout())) {
+      if (birthTime < (System.currentTimeMillis() - timeout)) {
         return; // machine is too busy and we didn't get enough CPU to perform more assertions
       }
       assertTrue("Member was incorrectly removed from surprise member set",
@@ -248,7 +250,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
     properties.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
 
     system = (InternalDistributedSystem)DistributedSystem.connect(properties);
-    JGroupMembershipManager mgr = MembershipManagerHelper.getMembershipManager(system);
+    MembershipManager mgr = MembershipManagerHelper.getMembershipManager(system);
     
     try {
       properties.remove(DistributionConfig.START_LOCATOR_NAME);
@@ -265,7 +267,8 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
       
       getLogWriter().info("test is setting slow view casting hook");
       // a new view won't be installed for 20 seconds
-      GMS.TEST_HOOK_SLOW_VIEW_CASTING=20;
+      fail("slow view casting must be implemented for the jgroups replacement");
+//      GMS.TEST_HOOK_SLOW_VIEW_CASTING=20;
       
       // show we can reconnect even though the old ID for this member is still
       // in the membership view.  Disconnecting will shut down the old DistributedSystem
@@ -289,7 +292,8 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
       });
     }
     finally {
-      GMS.TEST_HOOK_SLOW_VIEW_CASTING = 0;
+      //TODO
+//      GMS.TEST_HOOK_SLOW_VIEW_CASTING = 0;
       
       memberVM.invoke(new SerializableRunnable("disconnect") {
         public void run() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerTimeDUnitDisabledTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerTimeDUnitDisabledTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerTimeDUnitDisabledTest.java
deleted file mode 100644
index 9eb088f..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerTimeDUnitDisabledTest.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-/**
- * 
- */
-package com.gemstone.gemfire.distributed.internal;
-
-import java.util.Map;
-
-import org.junit.Ignore;
-
-import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
-import com.gemstone.gemfire.distributed.internal.membership.jgroup.MembershipManagerHelper;
-import com.gemstone.org.jgroups.Address;
-import com.gemstone.org.jgroups.JChannel;
-import com.gemstone.org.jgroups.protocols.GemFireTimeSync;
-import com.gemstone.org.jgroups.protocols.GemFireTimeSync.GFTimeSyncHeader;
-import com.gemstone.org.jgroups.protocols.GemFireTimeSync.TestHook;
-import com.gemstone.org.jgroups.stack.Protocol;
-
-import dunit.DistributedTestCase;
-import dunit.Host;
-import dunit.SerializableCallable;
-import dunit.VM;
-
-/**
- * The dunit test is testing time offset set at
- * {@link DistributionManager#cacheTimeDelta}
- * @author shobhit
- *
- */
-@Ignore("Disabled for bug 52348")
-public class DistributionManagerTimeDUnitDisabledTest extends DistributedTestCase {
-
-  public final int SKEDNESS = 10;
-  
-  /**
-   * @param name
-   */
-  public DistributionManagerTimeDUnitDisabledTest(String name) {
-    super(name);
-  }
-
-  public void testDistributionManagerTimeSync() {
-    disconnectAllFromDS();
-
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-    
-    // Start distributed system in all VMs.
-    
-    long vmtime0 = (Long) vm0.invoke(new SerializableCallable() {
-      
-      @Override
-      public Object call() throws Exception {
-        InternalDistributedSystem system = getSystem();
-        long timeOffset = system.getClock().getCacheTimeOffset();
-        return timeOffset;
-      }
-    });
-    
-    long vmtime1 = (Long) vm1.invoke(new SerializableCallable() {
-      
-      @Override
-      public Object call() throws Exception {
-        
-        InternalDistributedSystem system = getSystem();
-        long timeOffset = system.getClock().getCacheTimeOffset();
-        return timeOffset;
-      }
-    });
-    
-    long vmtime2 = (Long) vm2.invoke(new SerializableCallable() {
-      
-      @Override
-      public Object call() throws Exception {
-        
-        InternalDistributedSystem system = getSystem();
-        long timeOffset = system.getClock().getCacheTimeOffset();
-        return timeOffset;
-      }
-    });
-
-    getLogWriter().info("Offsets for VM0: " + vmtime0 + " VM1: " + vmtime1 + " and VM2: " +vmtime2);
-
-    // verify if they are skewed by more than 1 milli second.
-    int diff1 = (int) (vmtime0 - vmtime1);
-    int diff2 = (int) (vmtime1 - vmtime2);
-    int diff3 = (int) (vmtime2 - vmtime0);
-    
-    if ((diff1 > SKEDNESS || diff1 < -SKEDNESS) || (diff2 > SKEDNESS || diff2 < -SKEDNESS) || (diff3 > SKEDNESS || diff3 < -SKEDNESS)) {
-      fail("Clocks are skewed by more than " + SKEDNESS + " ms");
-    }
-  }
-
-  public void testDistributionManagerTimeSyncAfterJoinDone() {
-    disconnectAllFromDS();
-    
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-    
-    // Start distributed system in all VMs.
-    
-    vm0.invoke(new CacheSerializableRunnable("Starting vm0") {
-      @Override
-      public void run2() {
-        getSystem();
-      }
-    });
-    
-    vm1.invoke(new CacheSerializableRunnable("Starting vm1") {
-      @Override
-      public void run2() {
-        getSystem();
-      }
-    });
-
-    vm2.invoke(new CacheSerializableRunnable("Starting vm2") {
-      @Override
-      public void run2() {
-        getSystem();
-      }
-    });
-    
-    long vmtime0 = (Long) getTimeOffset(vm0);    
-    long vmtime1 = (Long) getTimeOffset(vm1);    
-    long vmtime2 = (Long) getTimeOffset(vm2);
-    
-    getLogWriter().info("Offsets for VM0: " + vmtime0 + " VM1: " + vmtime1 + " and VM2: " +vmtime2);
-
-    // verify if they are skewed by more than 1 milli second.
-    int diff1 = (int) (vmtime0 - vmtime1);
-    int diff2 = (int) (vmtime1 - vmtime2);
-    int diff3 = (int) (vmtime2 - vmtime0);
-    
-    if ((diff1 > SKEDNESS || diff1 < -SKEDNESS) || (diff2 > SKEDNESS || diff2 < -SKEDNESS) || (diff3 > SKEDNESS || diff3 < -SKEDNESS)) {
-      fail("Clocks are skewed by more than " + SKEDNESS + " ms");
-    }
-  }
-
-  public Object getTimeOffset(VM vm) {
-    return vm.invoke(new SerializableCallable() {
-      
-      @Override
-      public Object call() throws Exception {
-        InternalDistributedSystem system = getSystem();
-        JChannel jchannel = MembershipManagerHelper.getJChannel(system);
-
-        final UnitTestHook gftsTestHook = new UnitTestHook();
-        Protocol prot = jchannel.getProtocolStack().findProtocol("GemFireTimeSync");
-        GemFireTimeSync gts = (GemFireTimeSync)prot;
-        gts.setTestHook(gftsTestHook);
-        //Let the syncMessages reach to all VMs for new offsets.
-        waitForCriterion(new WaitCriterion() {
-          
-          @Override
-          public boolean done() {
-            return gftsTestHook.getBarrier() == GemFireTimeSync.OFFSET_RESPONSE;
-          }
-          
-          @Override
-          public String description() {
-            return "Waiting for this node to get time offsets from co-ordinator";
-          }
-        }, 500, 50, false);
-        
-        
-        long timeOffset = system.getClock().getCacheTimeOffset();
-        gts.setTestHook(null);
-        
-        return timeOffset;
-      }
-    });
-  }
-
-  public class UnitTestHook implements TestHook {
-
-    private int barrier = -1;
-
-    @Override
-    public void hook(int barr) {
-      this.barrier = barr;
-    }
-
-    @Override
-    public void setResponses(Map<Address, GFTimeSyncHeader> responses,
-        long currentTime) {
-    }
-
-    public Map<Address, GFTimeSyncHeader> getResponses() {
-      return null;
-    }
-
-    public long getCurTime() {
-      return 0;
-    }
-
-    public int getBarrier() {
-      return barrier;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/GemFireTimeSyncServiceDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/GemFireTimeSyncServiceDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/GemFireTimeSyncServiceDUnitTest.java
deleted file mode 100644
index 27211a7..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/GemFireTimeSyncServiceDUnitTest.java
+++ /dev/null
@@ -1,260 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-/**
- * 
- */
-package com.gemstone.gemfire.distributed.internal;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.ConnectException;
-import java.net.InetAddress;
-import java.util.Properties;
-
-import com.gemstone.gemfire.cache.CacheException;
-import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.distributed.DistributedSystem;
-import com.gemstone.gemfire.distributed.Locator;
-import com.gemstone.gemfire.distributed.internal.membership.jgroup.MembershipManagerHelper;
-import com.gemstone.gemfire.internal.AvailablePort;
-import com.gemstone.org.jgroups.JChannel;
-import com.gemstone.org.jgroups.protocols.GemFireTimeSync;
-import com.gemstone.org.jgroups.stack.Protocol;
-
-import dunit.DistributedTestCase;
-import dunit.Host;
-import dunit.SerializableCallable;
-import dunit.VM;
-
-/**
- * @author shobhit
- *
- */
-public class GemFireTimeSyncServiceDUnitTest extends DistributedTestCase {
-  
-  /**
-   * @param name
-   */
-  public GemFireTimeSyncServiceDUnitTest(String name) {
-    super(name);
-  }
-
-  /**
-   * After coordinator's sudden death and restart, this test verifies if
-   * {@link GemFireTimeSync} service thread is stopped in old coordinator or
-   * not.
-   */
-  public void testCoordinatorSyncThreadCancellation() {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0); // Locator for this Test DS.
-    VM vm1 = host.getVM(1); // Peer member.
-    VM vm2 = host.getVM(2); // Peer member.
-
-    final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(host); 
-    
-    final Properties props = new Properties();
-    props.setProperty("locators", host0 + "[" + locatorPort + "]");
-    props.setProperty("mcast-port", "0");
-    props.setProperty("jmx-manager", "false");
-    props.setProperty("enable-network-partition-detection", "true");
-    props.setProperty("log-level", getDUnitLogLevel());
-    props.put("member-timeout", "2000");
-    props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
-    
-    try {
-      // Start distributed system in vm0(locator) vm1(data node).
-      vm0.invoke(new CacheSerializableRunnable("Starting vm0") {
-        @Override
-        public void run2() {
-  
-          // Disconnect from any existing DS.
-          try {
-            system.disconnect();
-          } catch (Exception ex) {
-            // Let it go.
-          }
-          File myLocatorLogFile = new File("locator-"+locatorPort+".log"); 
-  
-          try {
-            Locator.startLocatorAndDS(locatorPort, myLocatorLogFile, props);
-          } catch (IOException e) {
-            fail("New locator startup failed on port: "+locatorPort, e);
-          }
-  
-        }
-      });
-   
-      // Add a new member to trigger VIEW_CHANGE.
-      vm1.invoke(new CacheSerializableRunnable("Starting vm1") {
-        @Override
-        public void run2() {
-          // Disconnect from any existing DS.
-          disconnectFromDS();
-          
-          DistributedSystem.connect(props);
-        }
-      });
-      
-      // Add a new member to trigger VIEW_CHANGE.
-      vm2.invoke(new CacheSerializableRunnable("Starting vm1") {
-        @Override
-        public void run2() {
-          // Disconnect from any existing DS.
-          disconnectFromDS();
-          
-          DistributedSystem.connect(props);
-        }
-      });
-
-      // Make current coordinator die.
-      vm0.invoke(new CacheSerializableRunnable("Shutdown my locator") {
-        
-        @Override
-        public void run2() throws CacheException {
-          Locator loc = InternalLocator.getLocators().iterator().next();
-          InternalDistributedSystem system = InternalDistributedSystem.getAnyInstance();
-          JChannel jchannel = MembershipManagerHelper.getJChannel(system);
-          Protocol prot = jchannel.getProtocolStack().findProtocol("GemFireTimeSync");
-          GemFireTimeSync gts = (GemFireTimeSync)prot;
-  
-          // Verify if Time Service is running.
-          assertFalse(gts.isServiceThreadCancelledForTest());
-  
-          loc.stop();
-          system.disconnect();
-        }
-      });
-  
-      // VM1 must become new coordinator.
-      vm1.invoke(new CacheSerializableRunnable("Verify vm1 coordinator") {
-        @Override
-        public void run2() {
-          InternalDistributedSystem system = InternalDistributedSystem.getAnyInstance();
-          DistributedMember coord = MembershipManagerHelper.getCoordinator(system);
-          
-          assertTrue(coord.equals(system.getDistributedMember()));
-        }
-      });
-
-      // VM1 must become new coordinator.
-      vm1.invoke(new CacheSerializableRunnable("Verify vm1 coordinator") {
-        @Override
-        public void run2() {
-          InternalDistributedSystem system = InternalDistributedSystem.getAnyInstance();
-          DistributedMember coord = MembershipManagerHelper.getCoordinator(system);
-          
-          assertTrue(coord.equals(system.getDistributedMember()));
-  
-          JChannel jchannel = MembershipManagerHelper.getJChannel(system);
-          Protocol prot = jchannel.getProtocolStack().findProtocol("GemFireTimeSync");
-          GemFireTimeSync gts = (GemFireTimeSync)prot;
-  
-          int maxAttempts =20;
-          while (maxAttempts > 0 && gts.isServiceThreadCancelledForTest()) {
-            maxAttempts--;
-            pause(100);
-          }
-  
-          // Verify if Time Service is running.
-          assertFalse(gts.isServiceThreadCancelledForTest());
-        }
-      });
-
-      vm0.invoke(new CacheSerializableRunnable("Restart my locator and verify it's coordinator again") {
-        
-        @Override
-        public void run2() throws CacheException {
-          try {
-            Locator.startLocatorAndDS(locatorPort, null, props);
-          } catch (IOException e) {
-            fail("Restart of new locator failed on port: "+locatorPort, e);
-          }
-          
-          InternalDistributedSystem system = InternalDistributedSystem.getAnyInstance();
-          DistributedMember coord = MembershipManagerHelper.getCoordinator(system);
-          
-          assertTrue(coord.equals(system.getDistributedMember()));
-          
-          JChannel jchannel = MembershipManagerHelper.getJChannel(system);
-          Protocol prot = jchannel.getProtocolStack().findProtocol("GemFireTimeSync");
-          GemFireTimeSync gts = (GemFireTimeSync)prot;
-  
-          int maxAttempts = 20;
-          while (maxAttempts > 0 && gts.isServiceThreadCancelledForTest()) {
-            maxAttempts--;
-            pause(100);
-          }
-          assertFalse(gts.isServiceThreadCancelledForTest());
-        }
-      });
-  
-      // After receiving VIEW_CHANGE locator should cancel its GemFireTimeSync service thread.
-      vm1.invoke(new CacheSerializableRunnable("Verify vm1 is not coordinator") {
-        
-        @Override
-        public void run2() throws CacheException {
-          InternalDistributedSystem system = InternalDistributedSystem.getAnyInstance();
-  
-          JChannel jchannel = MembershipManagerHelper.getJChannel(system);
-          Protocol prot = jchannel.getProtocolStack().findProtocol("GemFireTimeSync");
-          GemFireTimeSync gts = (GemFireTimeSync)prot;
-  
-          // Verify if Time Service is NOT running and service thread is cancelled.
-          assertTrue(gts.isServiceThreadCancelledForTest());
-        }
-      });
-      
-    } catch (Exception ex) {
-      fail("Test failed!", ex);
-    } finally {
-      // Shutdown locator and clean vm0 for other tests.
-      vm0.invoke(new CacheSerializableRunnable("Shutdown locator") {
-        
-        @Override
-        public void run2() throws CacheException {
-          try {
-            InternalDistributedSystem system = InternalDistributedSystem.getConnectedInstance();
-            if (system != null)
-              system.disconnect();
-          } catch (Exception e) {
-            fail("Stoping locator failed", e);
-          }
-        }
-      });
-      vm1.invoke(new CacheSerializableRunnable("Shutdown vm1") {
-        
-        @Override
-        public void run2() throws CacheException {
-          try {
-            InternalDistributedSystem system = InternalDistributedSystem.getConnectedInstance();
-            if (system != null)
-              system.disconnect();
-          } catch (Exception e) {
-            fail("Stoping vm1 failed", e);
-          }
-        }
-      });
-      
-      vm2.invoke(new CacheSerializableRunnable("Shutdown vm2") {
-        
-        @Override
-        public void run2() throws CacheException {
-          try {
-            InternalDistributedSystem system = InternalDistributedSystem.getConnectedInstance();
-            if (system != null)
-              system.disconnect();
-          } catch (Exception e) {
-            fail("Stoping vm2 failed", e);
-          }
-        }
-      });
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
index f941e5e..778ba3f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
@@ -7,20 +7,11 @@
  */
 package com.gemstone.gemfire.distributed.internal.membership;
 
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Vector;
+import junit.framework.TestCase;
 
 import org.junit.experimental.categories.Category;
 
-import com.gemstone.gemfire.distributed.internal.DistributionManager;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
-import com.gemstone.org.jgroups.View;
-import com.gemstone.org.jgroups.ViewId;
-import com.gemstone.org.jgroups.protocols.pbcast.GMS;
-import com.gemstone.org.jgroups.stack.IpAddress;
-
-import junit.framework.TestCase;
 
 @Category(UnitTest.class)
 public class MembershipJUnitTest extends TestCase {
@@ -42,58 +33,61 @@ public class MembershipJUnitTest extends TestCase {
    * @throws Exception
    */
   public void testFailedWeight() throws Exception {
-    // in #47342 a new view was created that contained a member that was joining but
-    // was no longer reachable.  The member was included in the failed-weight and not
-    // in the previous view-weight, causing a spurious network partition to be declared
-    IpAddress members[] = new IpAddress[] {
-        new IpAddress("localhost", 1), new IpAddress("localhost", 2), new IpAddress("localhost", 3),
-        new IpAddress("localhost", 4), new IpAddress("localhost", 5), new IpAddress("localhost", 6)};
-    int i = 0;
-    // weight 3
-    members[i].setVmKind(DistributionManager.LOCATOR_DM_TYPE);
-    members[i++].shouldntBeCoordinator(false);
-    // weight 3
-    members[i].setVmKind(DistributionManager.LOCATOR_DM_TYPE);
-    members[i++].shouldntBeCoordinator(false);
-    // weight 15 (cache+leader)
-    members[i].setVmKind(DistributionManager.NORMAL_DM_TYPE);
-    members[i++].shouldntBeCoordinator(true);
-    // weight 0
-    members[i].setVmKind(DistributionManager.ADMIN_ONLY_DM_TYPE);
-    members[i++].shouldntBeCoordinator(true);
-    // weight 0
-    members[i].setVmKind(DistributionManager.ADMIN_ONLY_DM_TYPE);
-    members[i++].shouldntBeCoordinator(true);
-    // weight 10
-    members[i].setVmKind(DistributionManager.NORMAL_DM_TYPE);
-    members[i++].shouldntBeCoordinator(true);
-    
-    ViewId vid = new ViewId(members[0], 4);
-    Vector<IpAddress> vmbrs = new Vector<IpAddress>();
-    for (i=0; i<members.length; i++) {
-      vmbrs.add(members[i]);
-    }
-    View lastView = new View(vid, vmbrs);
-    IpAddress leader = members[2];
-    assertTrue(!leader.preferredForCoordinator());
-    
-    IpAddress joiningMember = new IpAddress("localhost", 7);
-    joiningMember.setVmKind(DistributionManager.NORMAL_DM_TYPE);
-    joiningMember.shouldntBeCoordinator(true);
-    
-    // have the joining member and another cache process (weight 10) in the failed members
-    // collection and check to make sure that the joining member is not included in failed
-    // weight calcs.
-    Set<IpAddress> failedMembers = new HashSet<IpAddress>();
-    failedMembers.add(joiningMember);
-    failedMembers.add(members[members.length-1]); // cache
-    failedMembers.add(members[members.length-2]); // admin
-    int failedWeight = GMS.processFailuresAndGetWeight(lastView, leader, failedMembers);
-//    System.out.println("last view = " + lastView);
-//    System.out.println("failed mbrs = " + failedMembers);
-//    System.out.println("failed weight = " + failedWeight);
-    assertEquals("failure weight calculation is incorrect", 10, failedWeight);
-    assertTrue(!failedMembers.contains(members[members.length-2]));
+    fail("testFailedWeight must be reimplemented for jgroups replacement");
+  }
+  public void _testFailedWeight() throws Exception {
+//    // in #47342 a new view was created that contained a member that was joining but
+//    // was no longer reachable.  The member was included in the failed-weight and not
+//    // in the previous view-weight, causing a spurious network partition to be declared
+//    IpAddress members[] = new IpAddress[] {
+//        new IpAddress("localhost", 1), new IpAddress("localhost", 2), new IpAddress("localhost", 3),
+//        new IpAddress("localhost", 4), new IpAddress("localhost", 5), new IpAddress("localhost", 6)};
+//    int i = 0;
+//    // weight 3
+//    members[i].setVmKind(DistributionManager.LOCATOR_DM_TYPE);
+//    members[i++].shouldntBeCoordinator(false);
+//    // weight 3
+//    members[i].setVmKind(DistributionManager.LOCATOR_DM_TYPE);
+//    members[i++].shouldntBeCoordinator(false);
+//    // weight 15 (cache+leader)
+//    members[i].setVmKind(DistributionManager.NORMAL_DM_TYPE);
+//    members[i++].shouldntBeCoordinator(true);
+//    // weight 0
+//    members[i].setVmKind(DistributionManager.ADMIN_ONLY_DM_TYPE);
+//    members[i++].shouldntBeCoordinator(true);
+//    // weight 0
+//    members[i].setVmKind(DistributionManager.ADMIN_ONLY_DM_TYPE);
+//    members[i++].shouldntBeCoordinator(true);
+//    // weight 10
+//    members[i].setVmKind(DistributionManager.NORMAL_DM_TYPE);
+//    members[i++].shouldntBeCoordinator(true);
+//    
+//    ViewId vid = new ViewId(members[0], 4);
+//    Vector<IpAddress> vmbrs = new Vector<IpAddress>();
+//    for (i=0; i<members.length; i++) {
+//      vmbrs.add(members[i]);
+//    }
+//    View lastView = new View(vid, vmbrs);
+//    IpAddress leader = members[2];
+//    assertTrue(!leader.preferredForCoordinator());
+//    
+//    IpAddress joiningMember = new IpAddress("localhost", 7);
+//    joiningMember.setVmKind(DistributionManager.NORMAL_DM_TYPE);
+//    joiningMember.shouldntBeCoordinator(true);
+//    
+//    // have the joining member and another cache process (weight 10) in the failed members
+//    // collection and check to make sure that the joining member is not included in failed
+//    // weight calcs.
+//    Set<IpAddress> failedMembers = new HashSet<IpAddress>();
+//    failedMembers.add(joiningMember);
+//    failedMembers.add(members[members.length-1]); // cache
+//    failedMembers.add(members[members.length-2]); // admin
+//    int failedWeight = GMS.processFailuresAndGetWeight(lastView, leader, failedMembers);
+////    System.out.println("last view = " + lastView);
+////    System.out.println("failed mbrs = " + failedMembers);
+////    System.out.println("failed weight = " + failedWeight);
+//    assertEquals("failure weight calculation is incorrect", 10, failedWeight);
+//    assertTrue(!failedMembers.contains(members[members.length-2]));
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
new file mode 100644
index 0000000..e0a8749
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
@@ -0,0 +1,154 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.distributed.internal.membership.gms;
+
+import com.gemstone.gemfire.CancelException;
+import com.gemstone.gemfire.ForcedDisconnectException;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.distributed.internal.DistributionManager;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
+import com.gemstone.gemfire.distributed.internal.membership.MembershipManager;
+import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.Manager;
+import com.gemstone.gemfire.distributed.internal.membership.gms.mgr.GMSMembershipManager;
+
+import dunit.DistributedTestCase;
+import dunit.DistributedTestCase.WaitCriterion;
+
+/**
+ * This helper class provides access to membership manager information that
+ * is not other wise public
+ * @author bruce
+ * @since 5.5
+ */
+public class MembershipManagerHelper
+{
+
+  /** returns the JGroupMembershipManager for the given distributed system */
+  public static MembershipManager getMembershipManager(DistributedSystem sys) {
+    InternalDistributedSystem isys = (InternalDistributedSystem)sys;
+    DistributionManager dm = (DistributionManager)isys.getDistributionManager();
+    MembershipManager mgr = dm.getMembershipManager();
+    return mgr;
+  }
+  
+  /** act sick.  don't accept new connections and don't process ordered
+   * messages.  Use beHealthyMember() to reverse the effects.<p>
+   * Note that part of beSickMember's processing is to interrupt and
+   * stop any reader threads.  A slow listener in a reader thread should
+   * eat this interrupt.
+   * @param sys
+   */
+  public static void beSickMember(DistributedSystem sys) {
+    ((Manager)getMembershipManager(sys)).beSick();
+  }
+  
+  /**
+   * inhibit failure detection responses.  This can be used in conjunction
+   * with beSickMember
+   */
+  public static void playDead(DistributedSystem sys) {
+    try {
+      ((Manager)getMembershipManager(sys)).playDead();
+    }
+    catch (CancelException e) {
+      // really dead is as good as playing dead
+    }
+  }
+  
+  public static void beHealthyMember(DistributedSystem sys) {
+    ((Manager)getMembershipManager(sys)).beHealthy();
+  }
+  
+  /** returns the current coordinator address */
+  public static DistributedMember getCoordinator(DistributedSystem sys) {
+    return ((Manager)getMembershipManager(sys)).getCoordinator();
+  }
+
+  /** returns the current lead member address */
+  public static DistributedMember getLeadMember(DistributedSystem sys) {
+    return ((Manager)getMembershipManager(sys)).getLeadMember();
+  }
+  
+  /** register a test hook with the manager */
+  public static void addTestHook(DistributedSystem sys,
+      com.gemstone.gemfire.distributed.internal.membership.MembershipTestHook hook) {
+    getMembershipManager(sys).registerTestHook(hook);
+  }
+  
+  /** remove a registered test hook */
+  public static void removeTestHook(DistributedSystem sys,
+      com.gemstone.gemfire.distributed.internal.membership.MembershipTestHook hook) {
+    getMembershipManager(sys).unregisterTestHook(hook);
+  }
+  
+//  /**
+//   * returns the view lock.  Holding this lock will prevent the processing
+//   * of new views, and will prevent other threads from being able to access
+//   * the view
+//   */
+//  public static Object getViewLock(DistributedSystem sys) {
+//    return getMembershipManager(sys).latestViewLock;
+//  }
+  
+  /** returns true if the given member is shunned */
+  public static boolean isShunned(DistributedSystem sys, DistributedMember mbr) {
+    return ((Manager)getMembershipManager(sys)).isShunned(mbr);
+  }
+  
+  /** returns true if the given member is a surprise member */
+  public static boolean isSurpriseMember(DistributedSystem sys, DistributedMember mbr) {
+    return getMembershipManager(sys).isSurpriseMember(mbr);
+  }
+  
+  /**
+   * add a member id to the surprise members set, with the given millisecond
+   * clock birth time
+   */
+  public static void addSurpriseMember(DistributedSystem sys,
+      DistributedMember mbr, long birthTime) {
+    ((Manager)getMembershipManager(sys)).addSurpriseMemberForTesting(mbr, birthTime);
+  }
+
+  /**
+   * inhibits/enables logging of forced-disconnect messages.
+   * For quorum-lost messages this adds expected-exception annotations
+   * before and after the messages to make them invisible to greplogs
+   */
+  public static void inhibitForcedDisconnectLogging(boolean b) {
+    GMSMembershipManager.inhibitForcedDisconnectLogging(b);
+  }
+  
+  /**
+   * wait for a member to leave the view.  Throws an assertionerror
+   * if the timeout period elapses before the member leaves
+   */
+  public static void waitForMemberDeparture(final DistributedSystem sys, 
+      final DistributedMember member, final long timeout) {
+    WaitCriterion ev = new WaitCriterion() {
+      public boolean done() {
+        return !getMembershipManager(sys).getView().contains((InternalDistributedMember)member);
+      }
+      public String description() {
+        String assMsg = "Waited over " + timeout + " ms for " + member 
+            + " to depart, but it didn't";
+        return assMsg;
+      }
+    };
+    DistributedTestCase.waitForCriterion(ev, timeout, 200, true);
+  }
+  
+  public static void crashDistributedSystem(final DistributedSystem msys) {
+    MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
+    MembershipManagerHelper.playDead(msys);
+    getMembershipManager(msys).uncleanShutdown("test is forcing disconnect", new ForcedDisconnectException("test is forcing disconnect"));
+    MembershipManagerHelper.inhibitForcedDisconnectLogging(false);
+  }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/jgroup/MembershipManagerHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/jgroup/MembershipManagerHelper.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/jgroup/MembershipManagerHelper.java
deleted file mode 100644
index 00c4042..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/jgroup/MembershipManagerHelper.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.distributed.internal.membership.jgroup;
-
-import com.gemstone.gemfire.CancelException;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.distributed.DistributedSystem;
-import com.gemstone.gemfire.distributed.internal.DistributionManager;
-import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
-import com.gemstone.org.jgroups.Event;
-import com.gemstone.org.jgroups.JChannel;
-import com.gemstone.org.jgroups.stack.Protocol;
-
-import dunit.DistributedTestCase;
-import dunit.DistributedTestCase.WaitCriterion;
-
-/**
- * This helper class provides access to membership manager information that
- * is not other wise public
- * @author bruce
- * @since 5.5
- */
-public class MembershipManagerHelper
-{
-  /** returns the JGroups channel for the given distributed system */
-  public static JChannel getJChannel(DistributedSystem sys) {
-    return getMembershipManager(sys).channel;
-  }
-
-  /** returns the JGroupMembershipManager for the given distributed system */
-  public static JGroupMembershipManager getMembershipManager(DistributedSystem sys) {
-    InternalDistributedSystem isys = (InternalDistributedSystem)sys;
-    DistributionManager dm = (DistributionManager)isys.getDistributionManager();
-    JGroupMembershipManager mgr = (JGroupMembershipManager)dm.getMembershipManager();
-    return mgr;
-  }
-  
-  /** act sick.  don't accept new connections and don't process ordered
-   * messages.  Use beHealthyMember() to reverse the effects.<p>
-   * Note that part of beSickMember's processing is to interrupt and
-   * stop any reader threads.  A slow listener in a reader thread should
-   * eat this interrupt.
-   * @param sys
-   */
-  public static void beSickMember(DistributedSystem sys) {
-    getMembershipManager(sys).beSick();
-  }
-  
-  /**
-   * inhibit failure detection responses.  This can be used in conjunction
-   * with beSickMember
-   */
-  public static void playDead(DistributedSystem sys) {
-    try {
-      getMembershipManager(sys).playDead();
-    }
-    catch (CancelException e) {
-      // really dead is as good as playing dead
-    }
-  }
-  
-  public static void beHealthyMember(DistributedSystem sys) {
-    getMembershipManager(sys).beHealthy();
-  }
-  
-  /** returns the current coordinator address */
-  public static DistributedMember getCoordinator(DistributedSystem sys) {
-    return getMembershipManager(sys).getCoordinator();
-  }
-
-  /** returns the current lead member address */
-  public static DistributedMember getLeadMember(DistributedSystem sys) {
-    return getMembershipManager(sys).getLeadMember();
-  }
-  
-  /** register a test hook with the manager */
-  public static void addTestHook(DistributedSystem sys,
-      com.gemstone.gemfire.distributed.internal.membership.MembershipTestHook hook) {
-    getMembershipManager(sys).registerTestHook(hook);
-  }
-  
-  /** remove a registered test hook */
-  public static void removeTestHook(DistributedSystem sys,
-      com.gemstone.gemfire.distributed.internal.membership.MembershipTestHook hook) {
-    getMembershipManager(sys).unregisterTestHook(hook);
-  }
-  
-  /** register a test hook with the manager */
-  public static void addTestHook(DistributedSystem sys,
-      com.gemstone.org.jgroups.debug.JChannelTestHook hook) {
-    getMembershipManager(sys).registerTestHook(hook);
-  }
-  
-  /** remove a registered test hook */
-  public static void removeTestHook(DistributedSystem sys,
-      com.gemstone.org.jgroups.debug.JChannelTestHook hook) {
-    getMembershipManager(sys).unregisterTestHook(hook);
-  }
-  
-//  /**
-//   * returns the view lock.  Holding this lock will prevent the processing
-//   * of new views, and will prevent other threads from being able to access
-//   * the view
-//   */
-//  public static Object getViewLock(DistributedSystem sys) {
-//    return getMembershipManager(sys).latestViewLock;
-//  }
-  
-  /** returns true if the given member is shunned */
-  public static boolean isShunned(DistributedSystem sys, DistributedMember mbr) {
-    return getMembershipManager(sys).isShunned(mbr);
-  }
-  
-  /** returns true if the given member is a surprise member */
-  public static boolean isSurpriseMember(DistributedSystem sys, DistributedMember mbr) {
-    return getMembershipManager(sys).isSurpriseMember(mbr);
-  }
-  
-  /**
-   * add a member id to the surprise members set, with the given millisecond
-   * clock birth time
-   */
-  public static void addSurpriseMember(DistributedSystem sys,
-      DistributedMember mbr, long birthTime) {
-    getMembershipManager(sys).addSurpriseMemberForTesting(mbr, birthTime);
-  }
-
-  /**
-   * inhibits/enables logging of forced-disconnect messages.
-   * For quorum-lost messages this adds expected-exception annotations
-   * before and after the messages to make them invisible to greplogs
-   */
-  public static void inhibitForcedDisconnectLogging(boolean b) {
-    JGroupMembershipManager.inhibitForcedDisconnectLogging(b);
-  }
-  
-  /**
-   * wait for a member to leave the view.  Throws an assertionerror
-   * if the timeout period elapses before the member leaves
-   */
-  public static void waitForMemberDeparture(final DistributedSystem sys, 
-      final DistributedMember member, final long timeout) {
-    WaitCriterion ev = new WaitCriterion() {
-      public boolean done() {
-        return !getMembershipManager(sys).getView().contains(member);
-      }
-      public String description() {
-        String assMsg = "Waited over " + timeout + " ms for " + member 
-            + " to depart, but it didn't";
-        return assMsg;
-      }
-    };
-    DistributedTestCase.waitForCriterion(ev, timeout, 200, true);
-  }
-  
-  public static void crashDistributedSystem(final DistributedSystem msys) {
-    MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
-    MembershipManagerHelper.playDead(msys);
-    JChannel c = MembershipManagerHelper.getJChannel(msys);
-    Protocol udp = c.getProtocolStack().findProtocol("UDP");
-    udp.stop();
-    udp.passUp(new Event(Event.EXIT, new RuntimeException("killing members ds")));
-    try {
-      MembershipManagerHelper.getJChannel(msys).waitForClose();
-    }
-    catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      // attempt rest of work with interrupt bit set
-    }
-//    LogWriter bLogger =
-//      new LocalLogWriter(LogWriterImpl.ALL_LEVEL, System.out);
-    MembershipManagerHelper.inhibitForcedDisconnectLogging(false);
-  }
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/LocatorVersioningJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/LocatorVersioningJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/LocatorVersioningJUnitTest.java
deleted file mode 100644
index 7c5b84e..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/LocatorVersioningJUnitTest.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
- * 
- */
-package com.gemstone.gemfire.distributed.internal.tcpserver;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Vector;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.distributed.Locator;
-import com.gemstone.gemfire.distributed.internal.DistributionConfig;
-import com.gemstone.gemfire.distributed.internal.membership.jgroup.LocatorImpl;
-import com.gemstone.gemfire.internal.AvailablePort;
-import com.gemstone.gemfire.internal.Version;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-import com.gemstone.org.jgroups.stack.GossipClient;
-import com.gemstone.org.jgroups.stack.IpAddress;
-
-import dunit.DistributedTestCase;
-import dunit.DistributedTestCase.WaitCriterion;
-
-/**
- * @author shobhit
- *
- */
-@Category(IntegrationTest.class)
-public class LocatorVersioningJUnitTest {
-
-  
-  @Test
-  public void testLocatorStateFileBackwardCompatibility() throws IOException, InterruptedException {
-  
-    Locator locator = null;
-    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    File tmpFile = new File("locator-" + port + ".log");
-
-    int currFileVersion = LocatorImpl.FILE_FORMAT_VERSION;
-    Map fileVersionMap = LocatorImpl.getFileVersionMapForTestOnly();
-    assertEquals(2, fileVersionMap.size());
-
-    // Create old FILE_FORMAT_VERSION and map it with old Version.ordinal.
-    int newGossipVersion = currFileVersion-2;
-    try {
-      LocatorImpl.FILE_FORMAT_VERSION = newGossipVersion;
-      fileVersionMap.put(newGossipVersion, Integer.valueOf(Version.GFE_71.ordinal()));
-      TcpServer.isTesting = true;
-  
-      final Properties props = new Properties();
-      props.setProperty("mcast-port", "0");
-//      props.setProperty("log-level", "fine");
-      props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
-      locator = Locator.startLocatorAndDS(port, tmpFile, props);
-      Assert.assertEquals(locator, Locator.getLocators().iterator().next());
-      Thread.sleep(1000);
-      
-      final GossipClient client = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port),  500);
-      client.register("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55), 5000, false);
-  
-      WaitCriterion ev = new WaitCriterion() {
-        public boolean done() {
-          try {
-            Vector members = client.getMembers("mygroup1", 
-                new IpAddress(InetAddress.getLocalHost(), 55), true, 5000);
-            System.out.println("received response of " + members + " ("+members.size()+" entries)");
-            return members.size() == 1;
-          }
-          catch (Exception e) {
-            e.printStackTrace();
-            fail("unexpected exception");
-          }
-          return false; // NOTREACHED
-        }
-        public String description() {
-          return null;
-        }
-      };
-      
-      DistributedTestCase.waitForCriterion(ev, 1000, 200, true);
-      Vector members = client.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55), true,5000);
-      Assert.assertEquals(1, members.size());
-      Assert.assertEquals(new IpAddress(InetAddress.getLocalHost(), 55), members.get(0));
-  
-  
-      // Stop and restart locator
-      locator.stop();
-      locator = Locator.startLocatorAndDS(port, tmpFile, props);
-      
-      // Now restart should recover fine from old version state file.
-      final GossipClient client2 = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port),  500);
-      members = client2.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55), true,5000);
-      Assert.assertEquals(1, members.size());
-      Assert.assertEquals(new IpAddress(InetAddress.getLocalHost(), 55), members.get(0));
-    } finally {
-      fileVersionMap.remove(newGossipVersion);
-      LocatorImpl.FILE_FORMAT_VERSION = currFileVersion;
-      TcpServer.isTesting = false;
-      if (locator != null) {
-        locator.stop();
-      }
-    }
-  }
-
-  @Test
-  public void testLocatorStateFileBackwardCompatibilityWithGF701() throws IOException, InterruptedException {
-
-    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    File tmpFile = new File("locator-" + port + ".log");
-    
-    int currFileVersion = LocatorImpl.FILE_FORMAT_VERSION;
-    Map fileVersionMap = LocatorImpl.getFileVersionMapForTestOnly();
-    assertEquals(2, fileVersionMap.size());
-    
-    // Create old FILE_FORMAT_VERSION and map it with old Version.ordinal.
-    LocatorImpl.FILE_FORMAT_VERSION = currFileVersion-1;
-    TcpServer.isTesting = true;
-
-    final Properties props = new Properties();
-    props.setProperty("mcast-port", "0");
-//    props.setProperty("log-level", "fine");
-    props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
-    Locator loc = Locator.getLocator().startLocatorAndDS(port, tmpFile, props);
-    Assert.assertEquals(loc, Locator.getLocators().iterator().next());
-    Thread.sleep(1000);
-    
-    final GossipClient client = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port),  500);
-    client.register("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55), 5000, false);
-
-    WaitCriterion ev = new WaitCriterion() {
-      public boolean done() {
-        try {
-          Vector members = client.getMembers("mygroup1", 
-              new IpAddress(InetAddress.getLocalHost(), 55), true, 5000);
-          return members.size() == 1;
-        }
-        catch (Exception e) {
-          e.printStackTrace();
-          fail("unexpected exception");
-        }
-        return false; // NOTREACHED
-      }
-      public String description() {
-        return null;
-      }
-    };
-    
-    DistributedTestCase.waitForCriterion(ev, 1000, 200, true);
-    Vector members = client.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55), true,5000);
-    Assert.assertEquals(1, members.size());
-    Assert.assertEquals(new IpAddress(InetAddress.getLocalHost(), 55), members.get(0));
-
-
-    // Stop and restart locator
-    loc.stop();
-    loc = Locator.startLocatorAndDS(port, tmpFile, props);
-
-    // Change LocatorImpl back to latest File format version.
-    LocatorImpl.FILE_FORMAT_VERSION = currFileVersion;
-
-    // Now restart should recover fine from old version state file.
-    final GossipClient client2 = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port),  500);
-    members = client2.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), 55), true, 5000);
-    Assert.assertEquals(1, members.size());
-    Assert.assertEquals(new IpAddress(InetAddress.getLocalHost(), 55), members.get(0));
-    TcpServer.isTesting = false;
-    loc.stop();
-
-    // Check if log file has any IOException. As IOException doesn't affect main
-    // locator thread.
-    FileReader fr = new FileReader(tmpFile);
-    BufferedReader br = new BufferedReader(fr);
-    
-    try {
-      
-      String line = br.readLine();
-      boolean found = false;
-      while (line != null) {
-        if (line.contains("IOException")) {
-          found = true;
-        }
-        line = br.readLine();
-      }
-      
-      if (found) {
-        fail("IOException is thrown in locator, most probably because of not being able to read" +
-        		" previoous state from locator state file. Locator log file is: "
-            + tmpFile);
-      }
-    } finally {
-      br.close();
-      fr.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitDisabledTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitDisabledTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitDisabledTest.java
index d1caa79..a3a586b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitDisabledTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitDisabledTest.java
@@ -19,9 +19,6 @@ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.distributed.internal.tcpserver.TcpServer;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.Version;
-import com.gemstone.org.jgroups.stack.GossipClient;
-import com.gemstone.org.jgroups.stack.GossipServer;
-import com.gemstone.org.jgroups.stack.IpAddress;
 
 import dunit.DistributedTestCase;
 import dunit.Host;
@@ -140,21 +137,24 @@ public class TcpServerBackwardCompatDUnitDisabledTest extends DistributedTestCas
           TcpServer.OLDTESTVERSION -= 100;
           TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.TESTVERSION, Version.CURRENT_ORDINAL);
           TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.OLDTESTVERSION, Version.GFE_57.ordinal());
-          assertEquals("Gossip Version and Test version are not same", GossipServer.GOSSIPVERSION, TcpServer.TESTVERSION);
-          assertEquals("Previous Gossip Version and Test version are not same", GossipServer.OLDGOSSIPVERSION, TcpServer.OLDTESTVERSION);
+          assertEquals("Gossip Version and Test version are not same", TcpServer.GOSSIPVERSION, TcpServer.TESTVERSION);
+          assertEquals("Previous Gossip Version and Test version are not same", TcpServer.OLDGOSSIPVERSION, TcpServer.OLDTESTVERSION);
 
           Locator.startLocatorAndDS(port1, logFile1, props);
 
           // Start a gossip client to connect to first locator "locator0".
-          final GossipClient client = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port1),  500);
-          client.register("mygroup1", new IpAddress(InetAddress.getLocalHost(), port1), 5000, false);
+          fail("this test must be fixed to work with the jgroups replacement");
+          // TODO
+//          final GossipClient client = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port1),  500);
+//          client.register("mygroup1", new IpAddress(InetAddress.getLocalHost(), port1), 5000, false);
 
           WaitCriterion ev = new WaitCriterion() {
             public boolean done() {
               try {
-                Vector members = client.getMembers("mygroup1", 
-                    new IpAddress(InetAddress.getLocalHost(), port0), true, 5000);
-                return members.size() == 2;
+                // TODO
+//                Vector members = client.getMembers("mygroup1", 
+//                    new IpAddress(InetAddress.getLocalHost(), port0), true, 5000);
+//                return members.size() == 2;
               }
               catch (Exception e) {
                 e.printStackTrace();
@@ -168,10 +168,12 @@ public class TcpServerBackwardCompatDUnitDisabledTest extends DistributedTestCas
           };
           
           DistributedTestCase.waitForCriterion(ev, 1000, 200, true);
-          Vector members = client.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), port0), true, 5000);
-          Assert.assertEquals(2, members.size());
-          Assert.assertTrue(members.contains(new IpAddress(InetAddress.getLocalHost(), port0)));
-          Assert.assertTrue(members.contains(new IpAddress(InetAddress.getLocalHost(), port1)));
+          fail("this test must be fixed to work with the jgroups replacement");
+          // TODO
+//          Vector members = client.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), port0), true, 5000);
+//          Assert.assertEquals(2, members.size());
+//          Assert.assertTrue(members.contains(new IpAddress(InetAddress.getLocalHost(), port0)));
+//          Assert.assertTrue(members.contains(new IpAddress(InetAddress.getLocalHost(), port1)));
 
         } catch (IOException e) {
           fail("Locator1 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e);
@@ -200,28 +202,31 @@ public class TcpServerBackwardCompatDUnitDisabledTest extends DistributedTestCas
           TcpServer.OLDTESTVERSION -= 100;
           TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.TESTVERSION, Version.CURRENT_ORDINAL);
           TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.OLDTESTVERSION, Version.GFE_57.ordinal());
-          assertEquals("Gossip Version and Test version are not same", GossipServer.GOSSIPVERSION, TcpServer.TESTVERSION);
-          assertEquals("Previous Gossip Version and Test version are not same", GossipServer.OLDGOSSIPVERSION, TcpServer.OLDTESTVERSION);
+          assertEquals("Gossip Version and Test version are not same", TcpServer.GOSSIPVERSION, TcpServer.TESTVERSION);
+          assertEquals("Previous Gossip Version and Test version are not same", TcpServer.OLDGOSSIPVERSION, TcpServer.OLDTESTVERSION);
 
           Locator.startLocatorAndDS(port0, logFile0, props);
 
           // A new gossip client with new GOSSIPVERSION must be able
           // to connect with new locator on port1, remote locator.
           // Reuse locator0 VM.
-          final GossipClient client2 = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port1),  500);
-          Vector<IpAddress> members = client2.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), port1), true, 5000);
-          Assert.assertEquals(2, members.size());
+          fail("this test must be fixed to work with the jgroups replacement");
+          // TODO
+//          final GossipClient client2 = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port1),  500);
+//          Vector<IpAddress> members = client2.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), port1), true, 5000);
+//          Assert.assertEquals(2, members.size());
           // As they are coming from other locator, their pid is of other locator process.
-          getLogWriter().info(members.get(0) + " " + members.get(1));
-
-          for (IpAddress ipAddr : members) {
-            int port = ipAddr.getPort();
-            String hostname = ipAddr.getIpAddress().getHostAddress();
-            int pid = ipAddr.getProcessId();
-            Assert.assertTrue(" " + ipAddr, port == port0 || port == port1);
-            Assert.assertTrue(" " + ipAddr, hostname.equals(InetAddress.getLocalHost().getHostAddress()));
-            Assert.assertTrue(" " + ipAddr, pid == locator1.getPid());
-          }
+//          getLogWriter().info(members.get(0) + " " + members.get(1));
+
+          // TODO
+//          for (IpAddress ipAddr : members) {
+//            int port = ipAddr.getPort();
+//            String hostname = ipAddr.getIpAddress().getHostAddress();
+//            int pid = ipAddr.getProcessId();
+//            Assert.assertTrue(" " + ipAddr, port == port0 || port == port1);
+//            Assert.assertTrue(" " + ipAddr, hostname.equals(InetAddress.getLocalHost().getHostAddress()));
+//            Assert.assertTrue(" " + ipAddr, pid == locator1.getPid());
+//          }
 
         } catch (IOException e) {
           fail("Locator0 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/internal/BackwardCompatibilitySerializationJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/BackwardCompatibilitySerializationJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/BackwardCompatibilitySerializationJUnitTest.java
index 62bd5d5..63d8550 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/BackwardCompatibilitySerializationJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/BackwardCompatibilitySerializationJUnitTest.java
@@ -31,7 +31,6 @@ import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.internal.cache.DistributedPutAllOperation.EntryVersionsList;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-import com.gemstone.org.jgroups.util.VersionedStreamable;
 
 /**
  * Test the DSFID serialization framework added for rolling upgrades in 7.1
@@ -212,16 +211,7 @@ public class BackwardCompatibilitySerializationJUnitTest {
     Version[] versions = null;
     if (ds instanceof SerializationVersions) {
       versions = ((SerializationVersions)ds).getSerializationVersions();
-    } else {
-      short[] ordinals = ((VersionedStreamable)ds).getSerializationVersions();
-      if (ordinals != null && ordinals.length > 0) {
-        versions = new Version[ordinals.length];
-        for (int i=ordinals.length; i>=0; i--) {
-          versions[i] = Version.fromOrdinalNoThrow(ordinals[i], false);
-        }
-      }
     }
-
     if (versions != null && versions.length > 0) {
       for (int i = 0; i < versions.length; i++) {
         try {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
index 72db3f1..2a9bc9f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
@@ -37,7 +37,7 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.internal.membership.MembershipManager;
-import com.gemstone.gemfire.distributed.internal.membership.jgroup.MembershipManagerHelper;
+import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManagerHelper;
 import com.gemstone.gemfire.internal.AvailablePort;
 
 import dunit.DistributedTestCase;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
index 7092dce..675fcbe 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
@@ -7,7 +7,6 @@ import java.util.Properties;
 import com.gemstone.gemfire.cache.TimeoutException;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
-import com.gemstone.gemfire.distributed.internal.membership.jgroup.JGroupMembershipManager;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d2a942e8/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionLoadModelJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionLoadModelJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionLoadModelJUnitTest.java
index d1ea492..75a8ddd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionLoadModelJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionLoadModelJUnitTest.java
@@ -1229,7 +1229,7 @@ public class PartitionedRegionLoadModelJUnitTest {
     return new AddressComparor() {
       public boolean areSameZone(InternalDistributedMember member1,
           InternalDistributedMember member2) {
-        return member1.getIpAddress().equals(member2.getIpAddress());
+        return member1.getInetAddress().equals(member2.getInetAddress());
       }
 
       public boolean enforceUniqueZones() {



Mime
View raw message