hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mbau...@apache.org
Subject svn commit: r1369645 [3/3] - in /hbase/branches/0.89-fb: ./ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/client/ src/main/java/org/apache/hadoop/hbase/executor/ src/main/java/org/apache/hadoop/hbase/master/ src/main/java...
Date Sun, 05 Aug 2012 19:16:13 GMT
Added: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/LegacyRootZNodeUpdater.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/LegacyRootZNodeUpdater.java?rev=1369645&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/LegacyRootZNodeUpdater.java (added)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/LegacyRootZNodeUpdater.java Sun Aug  5 19:16:11 2012
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.StopStatus;
+import org.apache.hadoop.hbase.util.HasThread;
+
+/**
+ * A background thread that wakes up frequently and writes the legacy root region location znode
+ * (host:port only, no server start code).
+ */
+public class LegacyRootZNodeUpdater extends HasThread {
+
+  private static final int WAIT_MS = 1000;
+
+  private ZooKeeperWrapper zkw;
+  private StopStatus stopped;
+  private Object waitOn;
+
+  public LegacyRootZNodeUpdater(ZooKeeperWrapper zkw, StopStatus stopped,
+      Object waitOn) {
+    this.zkw = zkw;
+    this.waitOn = waitOn;
+    this.stopped = stopped;
+  }
+
+  @Override
+  public void run() {
+    Thread.currentThread().setName(LegacyRootZNodeUpdater.class.getName());
+    HServerInfo prevRootLocation = null;
+    boolean firstUpdate = true;
+    while (!stopped.isStopped()) {
+      HServerInfo rootLocation = zkw.readRootRegionServerInfo();
+      if (firstUpdate ||
+          (prevRootLocation != rootLocation &&  // check that they are not both null
+           (rootLocation == null ||  // this means prevRootLocation != null, so they are different
+            !rootLocation.equals(prevRootLocation)))) {
+        zkw.writeLegacyRootRegionLocation(rootLocation);
+        prevRootLocation = rootLocation;
+      }
+      try {
+        synchronized (waitOn) {
+          waitOn.wait(WAIT_MS);
+        }
+      } catch (InterruptedException ex) {
+        // Ignore. We will only stop if the master is shutting down.
+      }
+      firstUpdate = false;
+    }
+  }
+
+}

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java Sun Aug  5 19:16:11 2012
@@ -276,7 +276,7 @@ public class RecoverableZooKeeper {
     while (true) {
       try {
         byte[] revData = zk.getData(path, watcher, stat);
-        return this.removeMetaData(revData);
+        return removeMetaData(revData);
       } catch (KeeperException e) {
         switch (e.code()) {
           case CONNECTIONLOSS:
@@ -466,7 +466,7 @@ public class RecoverableZooKeeper {
               throw e;
             }
             LOG.error("Node " + path + " already exists and this is not a " +
-			"retry");
+            		"retry");
             throw e;
 
           case CONNECTIONLOSS:

Added: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePathAndData.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePathAndData.java?rev=1369645&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePathAndData.java (added)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePathAndData.java Sun Aug  5 19:16:11 2012
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import java.util.Arrays;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class ZNodePathAndData {
+
+  private String path;
+  private byte[] data;
+
+  public ZNodePathAndData(String path, byte[] data) {
+    this.path = path;
+    this.data = data;
+  }
+
+  @Override
+  public int hashCode() {
+    return path.hashCode() ^ Arrays.hashCode(data);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null)
+      return false;
+    ZNodePathAndData that = (ZNodePathAndData) obj;
+    return that.path.equals(path) && Bytes.equals(that.data, data);
+  }
+
+  public String getzNodePath() {
+    return path;
+  }
+
+  public byte[] getData() {
+    return data;
+  }
+
+}

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java Sun Aug  5 19:16:11 2012
@@ -56,15 +56,15 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.hbase.util.RuntimeExceptionAbortStrategy;
 import org.apache.zookeeper.AsyncCallback;
 import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.Watcher.Event.EventType;
 import org.apache.zookeeper.Watcher.Event.KeeperState;
+import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.ZooKeeper.States;
+import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
 
 /**
@@ -119,42 +119,37 @@ public class ZooKeeperWrapper implements
   private final int sessionTimeout;
   private RecoverableZooKeeper recoverableZK;
 
-  /*
-   * All the HBase directories are hosted under this parent
-   */
+  /** All the HBase directories are hosted under this parent */
   public final String parentZNode;
-  /*
-   * Specifies the RS hosting root
-   */
+
+  /** Specifies the RS hosting root (host, port, start code) */
   private final String rootRegionZNode;
-  /*
+  
+  /** A znode containing root regionserver host:port only for compatibility with old clients */ 
+  private final String legacyRootRegionZNode;
+
+  /**
    * This is the directory where the RS's create ephemeral nodes. The master
    * watches these nodes, and their expiry indicates RS death.
    */
   private final String rsZNode;
-  /*
-   * ZNode used for election of the primary master when there are secondaries.
-   */
+
+  /** ZNode used for election of the primary master when there are secondaries. */
   public final String masterElectionZNode;
-  /*
-   * State of the cluster - if up and running or shutting down
-   */
+
+  /** State of the cluster - if up and running or shutting down */
   public final String clusterStateZNode;
-  /*
-   * Regions that are in transition
-   */
+
+  /** Regions that are in transition */
   private final String rgnsInTransitZNode;
-  /*
-   * ZNode used for log splitting work assignment
-   */
+
+  /** ZNode used for log splitting work assignment */
   public final String splitLogZNode;
-  /*
-   * ZNode used for table-level schema modification locks
-   */
+
+  /** ZNode used for table-level schema modification locks */
   public final String tableLockZNode;
-  /*
-   * List of ZNodes in the unassgined region that are already being watched
-   */
+
+  /** List of ZNodes in the unassigned region that are already being watched */
   private Set<String> unassignedZNodesWatched = new HashSet<String>();
 
   private List<Watcher> listeners = Collections.synchronizedList(new ArrayList<Watcher>());
@@ -262,7 +257,10 @@ public class ZooKeeperWrapper implements
     parentZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
         HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
 
-    String rootServerZNodeName = conf.get("zookeeper.znode.rootserver", "root-region-server");
+    String rootServerZNodeName =
+        conf.get("zookeeper.znode.rootserver.complete", "root-region-server-complete");
+    String legacyRootServerZNodeName = 
+        conf.get("zookeeper.znode.rootserver", "root-region-server");
     String rsZNodeName         = conf.get("zookeeper.znode.rs", "rs");
     String masterAddressZNodeName = conf.get("zookeeper.znode.master", "master");
     String stateZNodeName      = conf.get("zookeeper.znode.state", "shutdown");
@@ -270,6 +268,7 @@ public class ZooKeeperWrapper implements
     String splitLogZNodeName   = conf.get("zookeeper.znode.splitlog", "splitlog");
     String tableLockZNodeName  = conf.get("zookeeper.znode.tableLock", "tableLock");
     rootRegionZNode     = getZNode(parentZNode, rootServerZNodeName);
+    legacyRootRegionZNode = getZNode(parentZNode, legacyRootServerZNodeName);
     rsZNode             = getZNode(parentZNode, rsZNodeName);
     rgnsInTransitZNode  = getZNode(parentZNode, regionsInTransitZNodeName);
     masterElectionZNode = getZNode(parentZNode, masterAddressZNodeName);
@@ -574,6 +573,13 @@ public class ZooKeeperWrapper implements
    *         there was a problem reading the ZNode.
    */
   public HServerAddress readRootRegionLocation() {
+    return HServerInfo.getAddress(readAddress(rootRegionZNode, null));
+  }
+
+  /**
+   * @return the location of the server serving the root region, including the start code
+   */
+  public HServerInfo readRootRegionServerInfo() {
     return readAddress(rootRegionZNode, null);
   }
 
@@ -584,7 +590,7 @@ public class ZooKeeperWrapper implements
    *         ZNode. The watcher is set only if the result is not null.
    */
   public HServerAddress readMasterAddress(Watcher watcher) {
-    return readAddress(masterElectionZNode, watcher);
+    return HServerInfo.getAddress(readAddress(masterElectionZNode, watcher));
   }
 
   /**
@@ -667,7 +673,7 @@ public class ZooKeeperWrapper implements
     return checkExistenceOf(masterElectionZNode);
   }
 
-  public HServerAddress readAddress(String znode, Watcher watcher) {
+  public HServerInfo readAddress(String znode, Watcher watcher) {
     try {
       LOG.debug("<" + instanceName + ">" + "Trying to read " + znode);
       return readAddressOrThrow(znode, watcher);
@@ -685,7 +691,7 @@ public class ZooKeeperWrapper implements
    * @return
    * @throws KeeperException
    */
-  public HServerAddress readAddressOrThrow(String znode, Watcher watcher)
+  public HServerInfo readAddressOrThrow(String znode, Watcher watcher)
   throws KeeperException {
     byte[] data;
     try {
@@ -699,7 +705,11 @@ public class ZooKeeperWrapper implements
 
     String addressString = Bytes.toString(data);
     LOG.debug("<" + instanceName + ">" + "Read ZNode " + znode + " got " + addressString);
-    return new HServerAddress(addressString);
+    if (HServerInfo.isValidServerName(addressString)) {
+      return HServerInfo.fromServerName(addressString);
+    } else {
+      return new HServerInfo(new HServerAddress(addressString));
+    }
   }
 
   /**
@@ -743,20 +753,20 @@ public class ZooKeeperWrapper implements
    * Delete ZNode containing root region location.
    * @return true if operation succeeded, false otherwise.
    */
-  public boolean deleteRootRegionLocation()  {
-    if (!ensureParentExists(rootRegionZNode)) {
+  private boolean deleteRootRegionLocation(String znode)  {
+    if (!ensureParentExists(znode)) {
       return false;
     }
 
     try {
-      deleteZNode(rootRegionZNode);
+      deleteZNode(znode);
       return true;
     } catch (KeeperException.NoNodeException e) {
       return true;    // ok, move on.
     } catch (KeeperException e) {
-      LOG.warn("<" + instanceName + ">" + "Failed to delete " + rootRegionZNode + ": " + e);
+      LOG.warn("<" + instanceName + ">" + "Failed to delete " + znode + ": " + e);
     } catch (InterruptedException e) {
-      LOG.warn("<" + instanceName + ">" + "Failed to delete " + rootRegionZNode + ": " + e);
+      LOG.warn("<" + instanceName + ">" + "Failed to delete " + znode + ": " + e);
     }
 
     return false;
@@ -774,7 +784,7 @@ public class ZooKeeperWrapper implements
   }
 
   /**
-   * Optionnally recursive deletion of specified znode
+   * Optionally recursive deletion of specified znode
    * @param znode
    * @param recursive
    * @throws KeeperException
@@ -811,12 +821,12 @@ public class ZooKeeperWrapper implements
     LOG.debug("<" + instanceName + ">" + "Deleted ZNode " + znode);
   }
 
-  private boolean createRootRegionLocation(String address) {
+  private boolean createRootRegionLocation(String znode, String address) {
     byte[] data = Bytes.toBytes(address);
     try {
-      recoverableZK.create(rootRegionZNode, data, Ids.OPEN_ACL_UNSAFE,
+      recoverableZK.create(znode, data, Ids.OPEN_ACL_UNSAFE,
                        CreateMode.PERSISTENT);
-      LOG.debug("<" + instanceName + ">" + "Created ZNode " + rootRegionZNode + " with data " + address);
+      LOG.debug("<" + instanceName + ">" + "Created ZNode " + znode + " with data " + address);
       return true;
     } catch (KeeperException e) {
       LOG.warn("<" + instanceName + ">" + "Failed to create root region in ZooKeeper: " + e);
@@ -827,11 +837,11 @@ public class ZooKeeperWrapper implements
     return false;
   }
 
-  private boolean updateRootRegionLocation(String address) {
+  private boolean updateRootRegionLocation(String znode, String address) {
     byte[] data = Bytes.toBytes(address);
     try {
-      recoverableZK.setData(rootRegionZNode, data, -1);
-      LOG.debug("<" + instanceName + ">" + "SetData of ZNode " + rootRegionZNode + " with " + address);
+      recoverableZK.setData(znode, data, -1);
+      LOG.debug("<" + instanceName + ">" + "SetData of ZNode " + znode + " with " + address);
       return true;
     } catch (KeeperException e) {
       LOG.warn("<" + instanceName + ">" + "Failed to set root region location in ZooKeeper: " + e);
@@ -845,25 +855,35 @@ public class ZooKeeperWrapper implements
   /**
    * Write root region location to ZooKeeper. If address is null, delete ZNode.
    * containing root region location.
-   * @param address HServerAddress to write to ZK.
+   * @param hsi server info (host/port/start code)
    * @return true if operation succeeded, false otherwise.
    */
-  public boolean writeRootRegionLocation(HServerAddress address) {
-    if (address == null) {
-      return deleteRootRegionLocation();
+  public boolean writeRootRegionLocation(HServerInfo hsi) {
+    return writeRootRegionLocation(hsi, false);
+  }
+
+  boolean writeLegacyRootRegionLocation(HServerInfo hsi) {
+    return writeRootRegionLocation(hsi, true);
+  }
+
+  private boolean writeRootRegionLocation(HServerInfo hsi, boolean isLegacyZNode) {
+    String znode = isLegacyZNode ? legacyRootRegionZNode : rootRegionZNode;
+    if (hsi == null) {
+      return deleteRootRegionLocation(znode);
     }
 
-    if (!ensureParentExists(rootRegionZNode)) {
+    if (!ensureParentExists(znode)) {
       return false;
     }
 
-    String addressString = address.toString();
+    String addressString = isLegacyZNode ? hsi.getServerAddress().toString() :
+        hsi.getServerName();
 
-    if (checkExistenceOf(rootRegionZNode)) {
-      return updateRootRegionLocation(addressString);
+    if (checkExistenceOf(znode)) {
+      return updateRootRegionLocation(znode, addressString);
     }
 
-    return createRootRegionLocation(addressString);
+    return createRootRegionLocation(znode, addressString);
   }
 
   /**
@@ -937,7 +957,8 @@ public class ZooKeeperWrapper implements
   }
 
   /**
-   * Set a watch on a region server location node
+   * Set a watch on a region server location node.
+   * @throws IOException if could not set a watch
    */
   public void setRSLocationWatch(HServerInfo info, Watcher watcher) throws IOException {
     String znode = getRSZNode(info);
@@ -1053,7 +1074,7 @@ public class ZooKeeperWrapper implements
     }
     for (String node : nodes) {
       String path = joinPath(znode, node);
-      list.add(readAddress(path, watcher));
+      list.add(readAddress(path, watcher).getServerAddress());
     }
     return list;
   }
@@ -1111,10 +1132,7 @@ public class ZooKeeperWrapper implements
     byte[] data = null;
     try {
       String path = getZNode(parentZNode, znode);
-      // TODO: ZK-REFACTOR: remove existence check?
-      if (checkExistenceOf(path)) {
-        data = recoverableZK.getData(path, watcher, stat);
-      }
+      data = recoverableZK.getData(path, watcher, stat);
     } catch (KeeperException e) {
       LOG.warn("<" + instanceName + ">" + "Failed to read " + znode
           + " znode in ZooKeeper: " + e);
@@ -1338,6 +1356,19 @@ public class ZooKeeperWrapper implements
     return readDataFromFullyQualifiedZNode(fullyQualifiedZNodeName, stat);
   }
 
+  public byte[] readUnassignedZNodeAndSetWatch(String znodeName) throws IOException {
+    String fullyQualifiedZNodeName = getZNode(parentZNode, znodeName);
+    synchronized (unassignedZNodesWatched) {
+      unassignedZNodesWatched.add(znodeName);
+      try {
+        return readDataFromFullyQualifiedZNode(fullyQualifiedZNodeName, null);
+      } catch (IOException ex) {
+        unassignedZNodesWatched.remove(znodeName);
+        throw ex;
+      }
+    }
+  }
+
   public byte[] readDataFromFullyQualifiedZNode(
       String fullyQualifiedZNodeName, Stat stat) throws IOException {
     byte[] data;
@@ -1483,7 +1514,9 @@ public class ZooKeeperWrapper implements
       try {
         writeZNode(znode, data, -1, true);
       } catch (IOException e) {
-        LOG.error("Error writing data for " + znode + ", could not update state to " + (HBaseEventType.fromByte(data[0])));
+        unassignedZNodesWatched.remove(znode);
+        LOG.error("Error writing data for " + znode + ", could not update state to "
+            + (HBaseEventType.fromByte(data[0])));
         abort("Error writing data for " + znode, e);
       }
     }
@@ -1859,24 +1892,6 @@ public class ZooKeeperWrapper implements
     throw ke;
   }
 
-  public static class ZNodePathAndData {
-    private String zNodePath;
-    private byte[] data;
-
-    public ZNodePathAndData(String zNodePath, byte[] data) {
-      this.zNodePath = zNodePath;
-      this.data = data;
-    }
-
-    public String getzNodePath() {
-      return zNodePath;
-    }
-    public byte[] getData() {
-      return data;
-    }
-
-  }
-
   /**
    * Blocks until there are no node in regions in transition. Used in testing
    * only.

Modified: hbase/branches/0.89-fb/src/main/resources/hbase-default.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/resources/hbase-default.xml?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/resources/hbase-default.xml (original)
+++ hbase/branches/0.89-fb/src/main/resources/hbase-default.xml Sun Aug  5 19:16:11 2012
@@ -427,7 +427,7 @@
       <description>
           This allows to put non-root multi-level index blocks into the block
           cache at the time the index is being written.
-      </description>
+      </description> 
   </property>
   <property>
       <name>hfile.index.block.max.size</name>
@@ -469,7 +469,7 @@
       <value>false</value>
       <description>
           Whether an HFile block should be added to the block cache when the
-          block is finished.
+          block is finished. 
       </description>
   </property>
   <property>
@@ -530,6 +530,16 @@
       the master and read by clients and region servers. If a relative path is
       given, the parent folder will be ${zookeeper.znode.parent}. By default,
       this means the root location is stored at /hbase/root-region-server.
+      This is the deprecated znode that stores host:port only. It is still
+      populated continuously by the master.
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.rootserver.complete</name>
+    <value>root-region-server-complete</value>
+    <description>
+      This is the new root regionserver znode that stores host:port and start
+      code. 
     </description>
   </property>
 

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Sun Aug  5 19:16:11 2012
@@ -44,7 +44,6 @@ import java.util.NavigableSet;
 import java.util.Random;
 import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.ConcurrentSkipListSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -78,6 +77,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.Writables;
@@ -120,7 +120,7 @@ public class HBaseTestingUtility {
 
   /** The root directory for all mini-cluster test data for this testing utility instance. */
   private File clusterTestBuildDir = null;
-
+  
   /** If there is a mini cluster running for this testing utility instance. */
   private boolean miniClusterRunning;
 
@@ -137,7 +137,7 @@ public class HBaseTestingUtility {
    * Default parent directory for test output.
    */
   public static final String DEFAULT_TEST_DIRECTORY = "target/build/data";
-
+  
   /** Filesystem URI used for map-reduce mini-cluster setup */
   private static String fsURI;
 
@@ -279,7 +279,7 @@ public class HBaseTestingUtility {
   /**
    * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
    * or does nothing.
-   * @throws IOException
+   * @throws IOException 
    * @throws Exception
    */
   public void shutdownMiniDFSCluster() throws IOException {
@@ -409,9 +409,13 @@ public class HBaseTestingUtility {
 
     // Don't leave here till we've done a successful scan of the .META.
     HTable t = null;
-    for (int i = 0; i < 10; ++i) {
+    for (int i = 0; i < 10; ++i) { 
       try {
         t = new HTable(this.conf, HConstants.META_TABLE_NAME);
+        for (Result result : t.getScanner(new Scan())) {
+          LOG.debug("Successfully read meta entry: " + result);
+        }
+        break;
       } catch (NoServerForRegionException ex) {
         LOG.error("META is not online, sleeping");
         Threads.sleepWithoutInterrupt(2000);
@@ -447,12 +451,7 @@ public class HBaseTestingUtility {
    */
   public void shutdownMiniCluster() throws IOException {
     LOG.info("Shutting down minicluster");
-    if (this.hbaseCluster != null) {
-      this.hbaseCluster.shutdown();
-      // Wait till hbase is down before going on to shutdown zk.
-      this.hbaseCluster.join();
-      hbaseCluster = null;
-    }
+    shutdownMiniHBaseCluster();
     shutdownMiniZKCluster();
     shutdownMiniDFSCluster();
 
@@ -888,7 +887,7 @@ public class HBaseTestingUtility {
     LOG.info("Starting mini mapreduce cluster...");
     // These are needed for the new and improved Map/Reduce framework
     Configuration c = getConfiguration();
-
+    
     setupClusterTestBuildDir();
     createDirsAndSetProperties();
 
@@ -1412,7 +1411,7 @@ REGION_LOOP:
         + new Random().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
   }
 
-  /**
+  /** 
    * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
    * called from single-threaded test setup code/
    */
@@ -1539,15 +1538,35 @@ REGION_LOOP:
     }
   }
 
-  public void setFileSystemURI(String fsURI) {
-    this.fsURI = fsURI;
+  public static void setFileSystemURI(String fsURI) {
+    HBaseTestingUtility.fsURI = fsURI;
+  }
+
+  private static void logMethodEntryAndSetThreadName(String methodName) {
+    LOG.info("\nStarting " + methodName + "\n");
+    Thread.currentThread().setName(methodName);
   }
 
   /**
    * Sets the current thread name to the caller's method name. 
    */
   public static void setThreadNameFromMethod() {
-    String methodName = new Throwable().getStackTrace()[1].getMethodName();
-    Thread.currentThread().setName(methodName);
+    logMethodEntryAndSetThreadName(new Throwable().getStackTrace()[1].getMethodName());
+  }
+
+  /**
+   * Sets the current thread name to the caller's caller's method name. 
+   */
+  public static void setThreadNameFromCallerMethod() {
+    logMethodEntryAndSetThreadName(new Throwable().getStackTrace()[2].getMethodName());
+  }
+
+  public void killMiniHBaseCluster() {
+    for (RegionServerThread rst : hbaseCluster.getRegionServerThreads()) {
+      rst.getRegionServer().kill();
+    }
+    for (HMaster master : hbaseCluster.getMasters()) {
+      master.stop("killMiniHBaseCluster");
+    }
   }
 }

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java?rev=1369645&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java Sun Aug  5 19:16:11 2012
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.util.Writables;
+import org.junit.Test;
+
+public class TestHServerInfo {
+
+  @Test
+  public void testHashCodeAndEquals() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L);
+    HServerInfo hsi2 = new HServerInfo(hsa1, 1L);
+    HServerInfo hsi3 = new HServerInfo(hsa1, 2L);
+    HServerInfo hsi4 = new HServerInfo(hsa1, 1L);
+    HServerAddress hsa2 = new HServerAddress("localhost", 1235);
+    HServerInfo hsi5 = new HServerInfo(hsa2, 1L);
+    assertEquals(hsi1.hashCode(), hsi2.hashCode());
+    assertTrue(hsi1.equals(hsi2));
+    assertFalse(hsi1.hashCode() == hsi3.hashCode());
+    assertFalse(hsi1.equals(hsi3));
+    assertEquals(hsi1.hashCode(), hsi4.hashCode());
+    assertTrue(hsi1.equals(hsi4));
+    assertFalse(hsi1.hashCode() == hsi5.hashCode());
+    assertFalse(hsi1.equals(hsi5));
+  }
+
+  @Test
+  public void testHServerInfoHServerInfo() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L);
+    HServerInfo hsi2 = new HServerInfo(hsi1);
+    assertEquals(hsi1, hsi2);
+  }
+
+  @Test
+  public void testGetServerAddress() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L);
+    assertEquals(hsi1.getServerAddress(), hsa1);
+  }
+
+  @Test
+  public void testToString() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L);
+    System.out.println(hsi1.toString());
+  }
+
+  @Test
+  public void testReadFields() throws IOException {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L);
+    HServerAddress hsa2 = new HServerAddress("localhost", 1235);
+    HServerInfo hsi2 = new HServerInfo(hsa2, 1L);
+    byte [] bytes = Writables.getBytes(hsi1);
+    HServerInfo deserialized =
+      (HServerInfo)Writables.getWritable(bytes, new HServerInfo());
+    assertEquals(hsi1, deserialized);
+    bytes = Writables.getBytes(hsi2);
+    deserialized = (HServerInfo)Writables.getWritable(bytes, new HServerInfo());
+    assertNotSame(hsa1, deserialized);
+  }
+
+  @Test
+  public void testCompareTo() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L);
+    HServerAddress hsa2 = new HServerAddress("localhost", 1235);
+    HServerInfo hsi2 = new HServerInfo(hsa2, 1L);
+    assertTrue(hsi1.compareTo(hsi1) == 0);
+    assertTrue(hsi2.compareTo(hsi2) == 0);
+    int compare1 = hsi1.compareTo(hsi2);
+    int compare2 = hsi2.compareTo(hsi1);
+    assertTrue((compare1 > 0)? compare2 < 0: compare2 > 0);
+  }
+
+  @Test
+  public void testFromServerName() {
+    String host = "127.0.0.1";
+    int port = 60020;
+    long startCode = 1343258056696L;
+    String serverName = host + HServerInfo.SERVERNAME_SEPARATOR + port +
+        HServerInfo.SERVERNAME_SEPARATOR + startCode;
+    HServerInfo hsi = HServerInfo.fromServerName(serverName);
+    assertEquals(host, hsi.getHostname());
+    assertEquals(port, hsi.getServerAddress().getPort());
+    assertEquals(startCode, hsi.getStartCode());
+    assertTrue(HServerInfo.isValidServerName(serverName));
+    assertEquals(serverName, hsi.getServerName());
+  }
+
+  @Test
+  public void testIsValidServerName() {
+    assertTrue(HServerInfo.isValidServerName("foo.bar,60020," + Long.MAX_VALUE));
+    assertTrue(HServerInfo.isValidServerName("127.0.0.1,60020," + Long.MAX_VALUE));
+    assertTrue(HServerInfo.isValidServerName("www.acme.com,80," + Long.MIN_VALUE));
+    assertFalse(HServerInfo.isValidServerName(",www.acme.com,80,0"));
+    assertTrue(HServerInfo.isValidServerName("foo.bar,60020," + Long.MAX_VALUE));
+    assertFalse(HServerInfo.isValidServerName("foo.bar,60020," + Long.MAX_VALUE + "a"));
+  }
+
+}
+
+

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/encoding/TestUpgradeFromHFileV1ToEncoding.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/encoding/TestUpgradeFromHFileV1ToEncoding.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/encoding/TestUpgradeFromHFileV1ToEncoding.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/encoding/TestUpgradeFromHFileV1ToEncoding.java Sun Aug  5 19:16:11 2012
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.util.Byte
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
 public class TestUpgradeFromHFileV1ToEncoding {
 

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java Sun Aug  5 19:16:11 2012
@@ -120,7 +120,7 @@ public class TestTableMapReduce extends 
   private void runTestOnTable(HTable table)
   throws IOException, InterruptedException, ClassNotFoundException {
     HBaseTestingUtility testUtil = new HBaseTestingUtility();
-    testUtil.setFileSystemURI(fs.getUri().toString());
+    HBaseTestingUtility.setFileSystemURI(fs.getUri().toString());
     MiniMRCluster mrCluster = testUtil.startMiniMapReduceCluster();
     LOG.debug("ZK client port before runing MR job: " +
         ZooKeeperWrapper.getZKClientPort(conf));
@@ -261,7 +261,7 @@ public class TestTableMapReduce extends 
     assertFalse(tmpjars.contains("guava"));
 
     System.err.println("appending guava jar");
-    TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
+    TableMapReduceUtil.addDependencyJars(job.getConfiguration(), 
         com.google.common.base.Function.class);
     tmpjars = job.getConfiguration().get("tmpjars");
     assertTrue(tmpjars.contains("guava"));

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/MultiMasterTest.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/MultiMasterTest.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/MultiMasterTest.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/MultiMasterTest.java Sun Aug  5 19:16:11 2012
@@ -42,24 +42,24 @@ import org.junit.After;
 
 /**
  * A base class for unit tests that require multiple masters, e.g. master
- * failover tests.
+ * failover tests. 
  */
 public class MultiMasterTest {
   private static final Log LOG = LogFactory.getLog(MultiMasterTest.class);
   private MiniHBaseCluster cluster;
 
-  protected final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  protected final Configuration conf = TEST_UTIL.getConfiguration();
+  protected final HBaseTestingUtility testUtil = new HBaseTestingUtility();
+  protected final Configuration conf = testUtil.getConfiguration();
 
   public void startMiniCluster(int numMasters, int numRS) throws IOException,
       InterruptedException {
-    cluster = TEST_UTIL.startMiniCluster(numMasters, numRS);
+    cluster = testUtil.startMiniCluster(numMasters, numRS);
   }
 
   @After
   public void tearDown() throws IOException {
     header("Starting cluster shutdown");
-    TEST_UTIL.shutdownMiniCluster();
+    testUtil.shutdownMiniCluster();
     assertTrue(
         "Some ZK wrapper instances in the namespace have not been closed."
             + " See error logs above.",

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestClusterStartupDetection.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestClusterStartupDetection.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestClusterStartupDetection.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestClusterStartupDetection.java Sun Aug  5 19:16:11 2012
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
 package org.apache.hadoop.hbase.master;
 
 import static org.junit.Assert.assertEquals;
@@ -16,7 +32,7 @@ public class TestClusterStartupDetection
    * Testing the master's ability to distinguish between a fresh cluster
    * startup and master failover. This is a simple restart of one master.
    */
-  @Test(timeout=240000)
+  @Test(timeout=60000)
   public void testStartupDetectionSimple()
       throws IOException, InterruptedException, KeeperException {
     ZooKeeperWrapper.setNamespaceForTesting();
@@ -47,7 +63,7 @@ public class TestClusterStartupDetection
     waitUntilRegionServersCheckIn(numRS - 1);
   }
 
-  @Test(timeout=240000)
+  @Test(timeout=60000)
   public void testStartupDetectionOnMasterDelay() throws IOException,
       InterruptedException {
     ZooKeeperWrapper.setNamespaceForTesting();
@@ -71,7 +87,7 @@ public class TestClusterStartupDetection
     waitUntilRegionServersCheckIn(numRS);
   }
 
-  @Test(timeout=240000)
+  @Test(timeout=60000)
   public void testStartupDetectionOnMasterFailover() throws IOException,
       InterruptedException {
     ZooKeeperWrapper.setNamespaceForTesting();
@@ -82,11 +98,6 @@ public class TestClusterStartupDetection
     startMiniCluster(numMasters, numRS);
     ensureMastersAreUp(numMasters);
 
-    // Initially all masters should think that this is a fresh cluster startup.
-    for (HMaster master : miniCluster().getMasters()) {
-      assertTrue(master.isClusterStartup());
-    }
-
     String oldActiveName;
     {
       final int activeIndex = getActiveMasterIndex();

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestLogSplitOnMasterFailover.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestLogSplitOnMasterFailover.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestLogSplitOnMasterFailover.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestLogSplitOnMasterFailover.java Sun Aug  5 19:16:11 2012
@@ -258,7 +258,7 @@ public class TestLogSplitOnMasterFailove
     List<HMaster> masters = miniCluster().getMasters();
 
     header("Starting data loader");
-    DataLoader dataLoader = new DataLoader(conf, TEST_UTIL);
+    DataLoader dataLoader = new DataLoader(conf, testUtil);
     Thread inserterThread = new Thread(dataLoader);
     inserterThread.start();
     dataLoader.waitUntilHalfRowsLoaded();

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java Sun Aug  5 19:16:11 2012
@@ -76,3 +76,4 @@ public class TestMasterFailover extends 
   }
 
 }
+ 

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRSLivenessOnMasterFailover.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRSLivenessOnMasterFailover.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRSLivenessOnMasterFailover.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRSLivenessOnMasterFailover.java Sun Aug  5 19:16:11 2012
@@ -38,7 +38,7 @@ public class TestRSLivenessOnMasterFailo
       InterruptedException, KeeperException {
     // Use low RPC timeout because the regionserver will try to talk to a
     // master that is not there.
-    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 3000);
+    testUtil.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 3000);
     startMiniCluster(1, 1);
 
     HServerAddress killedMasterAddress = killActiveMasterAndWaitToStop();
@@ -48,7 +48,7 @@ public class TestRSLivenessOnMasterFailo
     // not reload the master address from ZK, it will get stuck in an infinite
     // loop, which is the bug this unit test is trying to catch.
     ZooKeeperWrapper zkw = ZooKeeperWrapper.createInstance(
-        TEST_UTIL.getConfiguration(), "spoofMasterAddress");
+        testUtil.getConfiguration(), "spoofMasterAddress");
     assertNotNull(zkw);
     zkw.writeMasterAddress(killedMasterAddress);
     miniCluster().startRegionServerNoWait();

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRegionStateOnMasterFailure.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRegionStateOnMasterFailure.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRegionStateOnMasterFailure.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRegionStateOnMasterFailure.java Sun Aug  5 19:16:11 2012
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
@@ -33,6 +34,7 @@ import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.client.Delete;
@@ -44,11 +46,14 @@ import org.apache.hadoop.hbase.client.Sc
 import org.apache.hadoop.hbase.executor.HBaseEventHandler;
 import org.apache.hadoop.hbase.executor.HBaseEventHandler.HBaseEventHandlerListener;
 import org.apache.hadoop.hbase.executor.HBaseEventHandler.HBaseEventType;
+import org.apache.hadoop.hbase.master.handler.MasterCloseRegionHandler;
 import org.apache.hadoop.hbase.master.handler.MasterOpenRegionHandler;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.zookeeper.KeeperException;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -73,52 +78,139 @@ public class TestRegionStateOnMasterFail
   private static final int NUM_MASTERS = 2;
   private static final int NUM_RS = 3;
 
-  private Pattern META_AND_ROOT_RE = Pattern.compile(
-      (Bytes.toStringBinary(HConstants.META_TABLE_NAME) + "|" +
+  private static final int TEST_TIMEOUT_MS = 90 * 1000 * 1234567; 
+
+  private static final Pattern META_AND_ROOT_RE = Pattern.compile(
+      (Bytes.toStringBinary(HConstants.META_TABLE_NAME) + "|" + 
       Bytes.toStringBinary(HConstants.ROOT_TABLE_NAME)).replace(".", "\\."));
 
+  private List<HBaseEventHandlerListener> toUnregister =
+      new ArrayList<HBaseEventHandlerListener>();
+
+  @After
+  public void tearDown() throws IOException {
+    for (HBaseEventHandlerListener listener : toUnregister) {
+      HBaseEventHandler.unregisterListener(listener);
+    }
+    toUnregister.clear();
+    super.tearDown();
+  }
+
   private interface WayToCloseRegion {
     void closeRegion(HRegion region) throws IOException;
   }
 
+  private class CloseRegionThroughAdmin implements WayToCloseRegion { 
+    @Override
+    public void closeRegion(HRegion region) throws IOException {
+      header("Closing region " + region.getRegionNameAsString());
+      testUtil.closeRegion(region.getRegionName());
+    }
+  };
+
+  private class KillRegionServerWithRegion implements WayToCloseRegion {
+    public void closeRegion(HRegion region) throws IOException {
+      header("Aborting the region server with the region " +
+          region.getRegionNameAsString());
+      region.getRegionServer().abort("Killing region server holding " +
+          "region " + region);
+    }
+  }
+  
+  /** Kills -ROOT- and .META. regionservers */
+  private class KillRootAndMetaRS implements WayToCloseRegion {
+    public void closeRegion(HRegion ignored) throws IOException {
+      // Copy the list of region server threads because it will be modified as we kill
+      // -ROOT-/.META. regionservers.
+      for (RegionServerThread rst : 
+           new ArrayList<RegionServerThread>(miniCluster().getRegionServerThreads())) {
+        HRegionServer rs = rst.getRegionServer();
+        for (HRegionInfo hri : rs.getRegionsAssignment()) {
+          if (hri.isRootRegion() || hri.isMetaRegion()) {
+            rs.abort("Killing region server holding region " + hri.getRegionNameAsString());
+            break;
+          }
+        }
+      }
+    }
+  }
+
   @Before
   public void setUp() throws IOException, InterruptedException {
+    ServerManager.clearRSBlacklistInTest();
     startMiniCluster(NUM_MASTERS, NUM_RS);
     fillTable();
     shortSleep();
   }
 
-  @Test(timeout=180000)
-  public void testKillingRSAndMaster() throws IOException,
+  @Test(timeout=TEST_TIMEOUT_MS)
+  public void testCloseUserRegionKillMasterOnClosed() throws IOException,
       InterruptedException, KeeperException {
-    header("Starting the test to kill RS and master");
-    closeRegionAndKillMaster(new WayToCloseRegion() {
-      @Override
-      public void closeRegion(HRegion region) throws IOException {
-        header("Aborting the region server with the region " +
-            region.getRegionNameAsString());
-        region.getRegionServer().abort("Killing region server holding " +
-            "region " + region);
-      }
-    });
+    closeRegionAndKillMaster(TABLE_NAME, new CloseRegionThroughAdmin(),
+        HBaseEventType.RS2ZK_REGION_CLOSED);
   }
 
-  @Test(timeout=180000)
-  public void testClosingRegionAndKillingMaster() throws IOException,
+  @Test(timeout=TEST_TIMEOUT_MS)
+  public void testCloseUserRegionKillMasterOnOpened() throws IOException,
       InterruptedException, KeeperException {
-    header("Starting the test to close a region and kill master");
-    closeRegionAndKillMaster(new WayToCloseRegion() {
-      @Override
-      public void closeRegion(HRegion region) throws IOException {
-        header("Closing region " + region.getRegionNameAsString());
-        TEST_UTIL.closeRegion(region.getRegionName());
-      }
-    });
+    closeRegionAndKillMaster(TABLE_NAME, new CloseRegionThroughAdmin(),
+        HBaseEventType.RS2ZK_REGION_OPENED);
+  }
+
+  @Test(timeout=TEST_TIMEOUT_MS)
+  public void testCloseRootKillMasterOnClosed() throws Exception {
+    closeRegionAndKillMaster(HConstants.ROOT_TABLE_NAME, new CloseRegionThroughAdmin(),
+        HBaseEventType.RS2ZK_REGION_CLOSED);
   }
 
-  public void closeRegionAndKillMaster(WayToCloseRegion howToClose)
+  @Test(timeout=TEST_TIMEOUT_MS)
+  public void testCloseRootKillMasterOnOpened() throws Exception {
+    closeRegionAndKillMaster(HConstants.ROOT_TABLE_NAME, new CloseRegionThroughAdmin(),
+        HBaseEventType.RS2ZK_REGION_OPENED);
+  }
+
+  @Test(timeout=TEST_TIMEOUT_MS)
+  public void testCloseMetaKillMasterOnClosed() throws Exception {
+    closeRegionAndKillMaster(HConstants.META_TABLE_NAME, new CloseRegionThroughAdmin(),
+        HBaseEventType.RS2ZK_REGION_CLOSED);
+  }
+
+  @Test(timeout=TEST_TIMEOUT_MS)
+  public void testCloseMetaKillMasterOnOpened() throws Exception {
+    closeRegionAndKillMaster(HConstants.META_TABLE_NAME, new CloseRegionThroughAdmin(),
+        HBaseEventType.RS2ZK_REGION_OPENED);
+  }
+
+  @Test(timeout=TEST_TIMEOUT_MS)
+  public void testKillRSWithUserRegion() throws IOException,
+      InterruptedException, KeeperException {
+    closeRegionAndKillMaster(TABLE_NAME, new KillRegionServerWithRegion(),
+        HBaseEventType.RS2ZK_REGION_OPENED);
+  }
+
+  @Test(timeout=TEST_TIMEOUT_MS)
+  public void testKillRootRS() throws Exception {
+    closeRegionAndKillMaster(HConstants.ROOT_TABLE_NAME, new KillRegionServerWithRegion(),
+        HBaseEventType.RS2ZK_REGION_OPENED);
+  }
+
+  @Test(timeout=TEST_TIMEOUT_MS)
+  public void testKillMetaRS() throws Exception {
+    closeRegionAndKillMaster(HConstants.META_TABLE_NAME, new KillRegionServerWithRegion(),
+        HBaseEventType.RS2ZK_REGION_OPENED);
+  }
+
+  @Test(timeout=TEST_TIMEOUT_MS)
+  public void testKillRootMetaRS() throws Exception {
+    closeRegionAndKillMaster(HConstants.META_TABLE_NAME, new KillRootAndMetaRS(),
+        HBaseEventType.RS2ZK_REGION_OPENED);
+  }
+  
+  public void closeRegionAndKillMaster(byte[] tableName,
+      WayToCloseRegion howToClose, HBaseEventType eventToWatch)
       throws IOException, InterruptedException, KeeperException {
-    final List<HRegion> regions = miniCluster().getRegions(TABLE_NAME);
+    HBaseTestingUtility.setThreadNameFromCallerMethod();
+    final List<HRegion> regions = miniCluster().getRegions(tableName);
     assertEquals(1, regions.size());
     final HRegion region = regions.get(0);
     final String originalRS =
@@ -132,15 +224,16 @@ public class TestRegionStateOnMasterFail
 
     final String targetRegionName = region.getRegionNameAsString();
     MasterKillerListener listener = new MasterKillerListener(
-        targetRegionName);
+        targetRegionName, eventToWatch);
 
     HBaseEventHandler.registerListener(listener);
+    toUnregister.add(listener);
     howToClose.closeRegion(region);
 
     shortSleep();
 
     header("Waiting until all regions are assigned");
-    TEST_UTIL.waitUntilAllRegionsAssigned(1);
+    testUtil.waitUntilAllRegionsAssigned(1);
 
     if (listener.failed()) {
       fail("Fatal error in the event listener -- please check the logs.");
@@ -205,7 +298,7 @@ public class TestRegionStateOnMasterFail
 
   private Map<String, String> getAssignmentsFromMeta() throws IOException {
     header("Resulting region assignments in .META.:");
-    final HTable metaTable = new HTable(TEST_UTIL.getConfiguration(),
+    final HTable metaTable = new HTable(testUtil.getConfiguration(),
         HConstants.META_TABLE_NAME);
     final Scan scan = new Scan().addFamily(HConstants.CATALOG_FAMILY);
     final ResultScanner scanner = metaTable.getScanner(scan);
@@ -279,17 +372,19 @@ public class TestRegionStateOnMasterFail
   }
 
   /**
-   * A listener that kills the master before it can process the "region opened"
-   * event.
+   * A listener that kills the master before it can process the "region opened" or "region closed"
+   * events.
    */
-  private class MasterKillerListener
-      implements HBaseEventHandlerListener {
+  private class MasterKillerListener implements HBaseEventHandlerListener {
 
     private final String targetRegionName;
+    private HBaseEventType eventToWatch;
+
     private volatile boolean failed;
 
-    public MasterKillerListener(String targetRegionName) {
+    public MasterKillerListener(String targetRegionName, HBaseEventType eventToWatch) {
       this.targetRegionName = targetRegionName;
+      this.eventToWatch = eventToWatch;
     }
 
     public boolean failed() {
@@ -310,22 +405,27 @@ public class TestRegionStateOnMasterFail
       LOG.info(REGION_EVENT_MSG + "Event: " + eventType + ", handler: " +
           event.getClass().getSimpleName());
 
-      if (eventType != HBaseEventType.RS2ZK_REGION_OPENED ||
-          !(event instanceof MasterOpenRegionHandler)) {
+      if (eventType != eventToWatch || 
+         !(event instanceof MasterOpenRegionHandler ||
+           event instanceof MasterCloseRegionHandler)) {
+        LOG.info(REGION_EVENT_MSG + "Unrecognized event type/class: " + eventType + ", " +
+           event.getClass().getSimpleName() + ", ignoring");
         return;
       }
 
-      final MasterOpenRegionHandler regionEvent =
-          (MasterOpenRegionHandler) event;
+      final HBaseEventHandler regionEvent = (HBaseEventHandler) event;
 
       boolean terminateEventThread = false;
       try {
-        final String openedRegion = regionEvent.getRegionName();
-        logMsg("Opened region: " + openedRegion + ", region server name: "
-            + regionEvent.getRegionServerName() + ", target region: "
-            + targetRegionName);
-
-        if (targetRegionName.endsWith("." + openedRegion + ".")) {
+        final String regionName = regionEvent.getRegionName();
+        logMsg("Event: " + eventType + ", region: " + regionName + ", region server name: "
+            + regionEvent.getRegionServerName() + ", target region: " + targetRegionName);
+
+        // E.g. user table: regionName=01f5858c7919232822dd2525b7748aaf,
+        //     targetRegionName=TestRegionState,,1343862006169.01f5858c7919232822dd2525b7748aaf.
+        // Meta table: regionName=1028785192, targetRegionName=.META.,,1.1028785192
+        if (targetRegionName.endsWith("." + regionName + ".") ||  // user table
+            targetRegionName.endsWith("." + regionName)) {        // meta table
           // Blacklist the new regionserver from being assigned any
           // regions when the new master comes up. Then the master will
           // have to assign to the third regionserver.
@@ -336,12 +436,12 @@ public class TestRegionStateOnMasterFail
           assertHostPort(newRSHostPort);
           ServerManager.blacklistRSHostPortInTest(newRSHostPort);
           logMsg("Killing master right before it can process the event "
-              + eventType + " for region " + openedRegion);
+              + eventType + " for region " + regionName);
           HBaseEventHandler.unregisterListener(this);
           miniCluster().killActiveMaster();
           terminateEventThread = true;
         } else {
-          logMsg("Skipping event for region " + openedRegion
+          logMsg("Skipping event for region " + regionName
               + " (does not match " + targetRegionName + ")");
         }
       } catch (Throwable t) {
@@ -366,7 +466,7 @@ public class TestRegionStateOnMasterFail
    */
   private void fillTable() throws IOException, InterruptedException {
     Random rand = new Random(19387129L);
-    HTable table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
+    HTable table = testUtil.createTable(TABLE_NAME, FAMILIES);
     for (int iStoreFile = 0; iStoreFile < 4; ++iStoreFile) {
       for (int iRow = 0; iRow < 100; ++iRow) {
         final byte[] row = Bytes.toBytes("row" + iRow);
@@ -392,7 +492,7 @@ public class TestRegionStateOnMasterFail
         table.flushCommits();
       }
     }
-    TEST_UTIL.waitUntilAllRegionsAssigned(1);
+    testUtil.waitUntilAllRegionsAssigned(1);
   }
 
 }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java?rev=1369645&r1=1369644&r2=1369645&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java Sun Aug  5 19:16:11 2012
@@ -51,7 +51,7 @@ public class TestHRegionInfo {
                  + id + "." + md5HashInHex + ".",
                  nameStr);
   }
-
+  
   @Test
   public void testContainsRange() {
     HTableDescriptor tableDesc = new HTableDescriptor("testtable");
@@ -71,7 +71,7 @@ public class TestHRegionInfo {
     assertFalse(hri.containsRange(Bytes.toBytes("g"), Bytes.toBytes("g")));
     // Single row range entirely outside
     assertFalse(hri.containsRange(Bytes.toBytes("z"), Bytes.toBytes("z")));
-
+    
     // Degenerate range
     try {
       hri.containsRange(Bytes.toBytes("z"), Bytes.toBytes("a"));
@@ -79,4 +79,9 @@ public class TestHRegionInfo {
     } catch (IllegalArgumentException iae) {
     }
   }
+
+  @Test
+  public void testRootRegionName() {
+    assertEquals("70236052", HRegionInfo.ROOT_REGION_ENCODED_NAME_STR);
+  }
 }

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestDrainableQueue.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestDrainableQueue.java?rev=1369645&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestDrainableQueue.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestDrainableQueue.java Sun Aug  5 19:16:11 2012
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.junit.Test;
+
+public class TestDrainableQueue implements ParamCallable<Integer>{
+
+  private static final Log LOG = LogFactory.getLog(TestDrainableQueue.class);
+
+  private static final int NUM_ATTEMPTS = 10;
+  private static final int NUM_PRODUCERS = 20;
+  private static final int NUM_EVENTS_PER_BATCH = 1000;
+
+  private CountDownLatch shouldDrain;
+  private DrainableQueue<Integer> q;
+  private int eventsProcessed;
+  private AtomicInteger numEnqueued = new AtomicInteger();
+
+  private class Producer implements Callable<Void> {
+
+    private final int index;
+
+    private Producer(int index) {
+      this.index = index;
+    }
+
+    @Override
+    public Void call() throws Exception {
+      LOG.info("Starting producer " + index);
+      try {
+        Random rand = new Random(982735927437L + index);
+        for (int i = 0; i < NUM_EVENTS_PER_BATCH; ++i) {
+          if (q.enqueue(rand.nextInt(100000))) {
+            numEnqueued.incrementAndGet();
+          }
+          if (rand.nextBoolean()) {
+            Threads.sleep(rand.nextInt(3));
+          }
+          shouldDrain.countDown();
+        }
+      } finally {
+        LOG.info("Finishing producer " + index);
+      }
+      return null;
+    }
+  }
+
+  @Override
+  public void call(Integer x) {
+    if (x % (NUM_PRODUCERS * 10) == 0) {
+      Threads.sleep(x % 3);
+    }
+    ++eventsProcessed;
+  }
+
+  @Test(timeout = 30 * 1000)
+  public void testDrainableQueue() throws Exception {
+    for (int attempt = 0; attempt < NUM_ATTEMPTS; ++attempt) {
+      final int totalEvents = NUM_PRODUCERS * NUM_EVENTS_PER_BATCH;
+      final int drainAfterNEvents = totalEvents / 2;
+      shouldDrain = new CountDownLatch(drainAfterNEvents);
+      numEnqueued.set(0);
+      q = new DrainableQueue<Integer>("queue");
+      ExecutorService exec = Executors.newFixedThreadPool(NUM_PRODUCERS);
+      CompletionService<Void> cs = new ExecutorCompletionService<Void>(exec);
+      List<Future<Void>> futures = new ArrayList<Future<Void>>();
+      for (int producer = 0; producer < NUM_PRODUCERS; ++producer) {
+        futures.add(cs.submit(new Producer(producer)));
+      }
+      shouldDrain.await();
+      eventsProcessed = 0;
+      LOG.info("Starting draining the queue");
+      q.drain(this);
+      LOG.info("Finished draining the queue");
+      assertEquals(numEnqueued.get(), eventsProcessed);
+      LOG.info("Events processed: " + eventsProcessed + ", drainAfterNEvents: "
+          + drainAfterNEvents);
+      assertTrue(eventsProcessed >= drainAfterNEvents);
+      for (Future<Void> f : futures) {
+        try {
+          f.get();
+        } catch (ExecutionException ex) {
+          LOG.error("Exception from producer thread", ex);
+          if (ex.getCause() instanceof AssertionError) {
+            throw (AssertionError) ex.getCause();
+          }
+          throw ex;
+        }
+      }
+      exec.shutdown();
+      assertTrue(exec.awaitTermination(5, TimeUnit.SECONDS));
+    }
+  }
+
+}



Mime
View raw message