hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1097275 [7/8] - in /hbase/trunk: ./ src/docbkx/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/avro/ src/main/java/org/apache/hadoop/hbase/catalog/ src/main/java/org/apache/hadoop/hbase/client/ src/main/java/o...
Date Wed, 27 Apr 2011 23:12:44 GMT
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java Wed Apr 27 23:12:42 2011
@@ -20,9 +20,8 @@
 package org.apache.hadoop.hbase.zookeeper;
 
 import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.catalog.RootLocationEditor;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -54,31 +53,34 @@ public class RootRegionTracker extends Z
 
   /**
    * Gets the root region location, if available.  Null if not.  Does not block.
-   * @return server address for server hosting root region, null if none available
+   * @return server name
    * @throws InterruptedException 
    */
-  public HServerAddress getRootRegionLocation() throws InterruptedException {
-    return dataToHServerAddress(super.getData());
+  public ServerName getRootRegionLocation() throws InterruptedException {
+    byte [] data = super.getData();
+    return data == null? null: new ServerName(dataToString(data));
   }
 
   /**
    * Gets the root region location, if available, and waits for up to the
    * specified timeout if not immediately available.
    * @param timeout maximum time to wait, in millis
-   * @return server address for server hosting root region, null if timed out
+   * @return server name for server hosting root region formatted as per
+   * {@link ServerName}, or null if none available
    * @throws InterruptedException if interrupted while waiting
    */
-  public HServerAddress waitRootRegionLocation(long timeout)
+  public ServerName waitRootRegionLocation(long timeout)
   throws InterruptedException {
-    return dataToHServerAddress(super.blockUntilAvailable(timeout));
+    String str = dataToString(super.blockUntilAvailable(timeout));
+    return str == null? null: new ServerName(str);
   }
 
   /*
    * @param data
    * @return Returns null if <code>data</code> is null else converts passed data
-   * to an HServerAddress instance.
+   * to a String instance.
    */
-  private static HServerAddress dataToHServerAddress(final byte [] data) {
-    return data == null ? null: new HServerAddress(Bytes.toString(data));
+  private static String dataToString(final byte [] data) {
+    return data == null ? null: Bytes.toString(data);
   }
-}
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java Wed Apr 27 23:12:42 2011
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.executor.RegionTransitionData;
 import org.apache.hadoop.hbase.executor.EventHandler.EventType;
 import org.apache.zookeeper.AsyncCallback;
@@ -130,13 +131,13 @@ public class ZKAssign {
    * @throws KeeperException.NodeExistsException if node already exists
    */
   public static void createNodeOffline(ZooKeeperWatcher zkw, HRegionInfo region,
-      String serverName)
+      ServerName serverName)
   throws KeeperException, KeeperException.NodeExistsException {
     createNodeOffline(zkw, region, serverName, EventType.M_ZK_REGION_OFFLINE);
   }
 
   public static void createNodeOffline(ZooKeeperWatcher zkw, HRegionInfo region,
-      String serverName, final EventType event)
+      ServerName serverName, final EventType event)
   throws KeeperException, KeeperException.NodeExistsException {
     LOG.debug(zkw.prefix("Creating unassigned node for " +
       region.getEncodedName() + " in OFFLINE state"));
@@ -165,7 +166,7 @@ public class ZKAssign {
    * @throws KeeperException.NodeExistsException if node already exists
    */
   public static void asyncCreateNodeOffline(ZooKeeperWatcher zkw,
-      HRegionInfo region, String serverName,
+      HRegionInfo region, ServerName serverName,
       final AsyncCallback.StringCallback cb, final Object ctx)
   throws KeeperException {
     LOG.debug(zkw.prefix("Async create of unassigned node for " +
@@ -198,7 +199,7 @@ public class ZKAssign {
    * @throws KeeperException.NoNodeException if node does not exist
    */
   public static void forceNodeOffline(ZooKeeperWatcher zkw, HRegionInfo region,
-      String serverName)
+      ServerName serverName)
   throws KeeperException, KeeperException.NoNodeException {
     LOG.debug(zkw.prefix("Forcing existing unassigned node for " +
       region.getEncodedName() + " to OFFLINE state"));
@@ -231,7 +232,7 @@ public class ZKAssign {
    * @throws KeeperException.NodeExistsException if node already exists
    */
   public static boolean createOrForceNodeOffline(ZooKeeperWatcher zkw,
-      HRegionInfo region, String serverName)
+      HRegionInfo region, ServerName serverName)
   throws KeeperException {
     LOG.debug(zkw.prefix("Creating (or updating) unassigned node for " +
       region.getEncodedName() + " with OFFLINE state"));
@@ -464,7 +465,7 @@ public class ZKAssign {
    * @throws KeeperException.NodeExistsException if node already exists
    */
   public static int createNodeClosing(ZooKeeperWatcher zkw, HRegionInfo region,
-      String serverName)
+      ServerName serverName)
   throws KeeperException, KeeperException.NodeExistsException {
     LOG.debug(zkw.prefix("Creating unassigned node for " +
       region.getEncodedName() + " in a CLOSING state"));
@@ -506,7 +507,7 @@ public class ZKAssign {
    * @throws KeeperException if unexpected zookeeper exception
    */
   public static int transitionNodeClosed(ZooKeeperWatcher zkw,
-      HRegionInfo region, String serverName, int expectedVersion)
+      HRegionInfo region, ServerName serverName, int expectedVersion)
   throws KeeperException {
     return transitionNode(zkw, region, serverName,
         EventType.RS_ZK_REGION_CLOSING,
@@ -540,14 +541,14 @@ public class ZKAssign {
    * @throws KeeperException if unexpected zookeeper exception
    */
   public static int transitionNodeOpening(ZooKeeperWatcher zkw,
-      HRegionInfo region, String serverName)
+      HRegionInfo region, ServerName serverName)
   throws KeeperException {
     return transitionNodeOpening(zkw, region, serverName,
       EventType.M_ZK_REGION_OFFLINE);
   }
 
   public static int transitionNodeOpening(ZooKeeperWatcher zkw,
-      HRegionInfo region, String serverName, final EventType beginState)
+      HRegionInfo region, ServerName serverName, final EventType beginState)
   throws KeeperException {
     return transitionNode(zkw, region, serverName, beginState,
       EventType.RS_ZK_REGION_OPENING, -1);
@@ -580,7 +581,7 @@ public class ZKAssign {
    * @throws KeeperException if unexpected zookeeper exception
    */
   public static int retransitionNodeOpening(ZooKeeperWatcher zkw,
-      HRegionInfo region, String serverName, int expectedVersion)
+      HRegionInfo region, ServerName serverName, int expectedVersion)
   throws KeeperException {
     return transitionNode(zkw, region, serverName,
         EventType.RS_ZK_REGION_OPENING,
@@ -616,7 +617,7 @@ public class ZKAssign {
    * @throws KeeperException if unexpected zookeeper exception
    */
   public static int transitionNodeOpened(ZooKeeperWatcher zkw,
-      HRegionInfo region, String serverName, int expectedVersion)
+      HRegionInfo region, ServerName serverName, int expectedVersion)
   throws KeeperException {
     return transitionNode(zkw, region, serverName,
         EventType.RS_ZK_REGION_OPENING,
@@ -652,7 +653,7 @@ public class ZKAssign {
    * @throws KeeperException if unexpected zookeeper exception
    */
   public static int transitionNode(ZooKeeperWatcher zkw, HRegionInfo region,
-      String serverName, EventType beginState, EventType endState,
+      ServerName serverName, EventType beginState, EventType endState,
       int expectedVersion)
   throws KeeperException {
     return transitionNode(zkw, region, serverName, beginState, endState,
@@ -660,7 +661,7 @@ public class ZKAssign {
   }
 
   public static int transitionNode(ZooKeeperWatcher zkw, HRegionInfo region,
-      String serverName, EventType beginState, EventType endState,
+      ServerName serverName, EventType beginState, EventType endState,
       int expectedVersion, final byte [] payload)
   throws KeeperException {
     String encoded = region.getEncodedName();
@@ -699,7 +700,7 @@ public class ZKAssign {
         "unassigned node for " + encoded +
         " from " + beginState + " to " + endState + " failed, " +
         "the node existed but was in the state " + existingData.getEventType() +
-        " set by the server " + existingData.getServerName()));
+        " set by the server " + serverName));
       return -1;
     }
 

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java Wed Apr 27 23:12:42 2011
@@ -33,8 +33,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.executor.RegionTransitionData;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.zookeeper.AsyncCallback;
@@ -126,19 +124,6 @@ public class ZKUtil {
   }
 
   /**
-   * Get the unique node-name for the specified regionserver.
-   *
-   * Used when a server puts up an ephemeral node for itself and needs to use
-   * a unique name.
-   *
-   * @param serverInfo server information
-   * @return unique, zookeeper-safe znode path for the server instance
-   */
-  public static String getNodeName(HServerInfo serverInfo) {
-    return serverInfo.getServerName();
-  }
-
-  /**
    * Get the name of the current node from the specified fully-qualified path.
    * @param path fully-qualified path
    * @return name of the current node
@@ -333,38 +318,6 @@ public class ZKUtil {
   }
 
   /**
-   * Lists the children of the specified znode, retrieving the data of each
-   * child as a server address.
-   *
-   * Used to list the currently online regionservers and their addresses.
-   *
-   * Sets no watches at all, this method is best effort.
-   *
-   * Returns an empty list if the node has no children.  Returns null if the
-   * parent node itself does not exist.
-   *
-   * @param zkw zookeeper reference
-   * @param znode node to get children of as addresses
-   * @return list of data of children of specified znode, empty if no children,
-   *         null if parent does not exist
-   * @throws KeeperException if unexpected zookeeper exception
-   */
-  public static List<HServerAddress> listChildrenAndGetAsAddresses(
-      ZooKeeperWatcher zkw, String znode)
-  throws KeeperException {
-    List<String> children = listChildrenNoWatch(zkw, znode);
-    if(children == null) {
-      return null;
-    }
-    List<HServerAddress> addresses =
-      new ArrayList<HServerAddress>(children.size());
-    for(String child : children) {
-      addresses.add(getDataAsAddress(zkw, joinZNode(znode, child)));
-    }
-    return addresses;
-  }
-
-  /**
    * Lists the children of the specified znode without setting any watches.
    *
    * Used to list the currently online regionservers and their addresses.
@@ -602,32 +555,6 @@ public class ZKUtil {
   }
 
   /**
-   * Get the data at the specified znode, deserialize it as an HServerAddress,
-   * and set a watch.
-   *
-   * Returns the data as a server address and sets a watch if the node exists.
-   * Returns null and no watch is set if the node does not exist or there is an
-   * exception.
-   *
-   * @param zkw zk reference
-   * @param znode path of node
-   * @return data of the specified node as a server address, or null
-   * @throws KeeperException if unexpected zookeeper exception
-   */
-  public static HServerAddress getDataAsAddress(ZooKeeperWatcher zkw,
-      String znode)
-  throws KeeperException {
-    byte [] data = getDataAndWatch(zkw, znode);
-    if(data == null) {
-      return null;
-    }
-    String addrString = Bytes.toString(data);
-    LOG.debug(zkw.prefix("Read server address from znode " + znode + ": " +
-      addrString));
-    return new HServerAddress(addrString);
-  }
-
-  /**
    * Update the data of an existing node with the expected version to have the
    * specified data.
    *
@@ -657,31 +584,6 @@ public class ZKUtil {
   //
 
   /**
-   * Set the specified znode to be an ephemeral node carrying the specified
-   * server address.  Used by masters for their ephemeral node and regionservers
-   * for their ephemeral node.
-   *
-   * If the node is created successfully, a watcher is also set on the node.
-   *
-   * If the node is not created successfully because it already exists, this
-   * method will also set a watcher on the node.
-   *
-   * If there is another problem, a KeeperException will be thrown.
-   *
-   * @param zkw zk reference
-   * @param znode path of node
-   * @param address server address
-   * @return true if address set, false if not, watch set in both cases
-   * @throws KeeperException if unexpected zookeeper exception
-   */
-  public static boolean setAddressAndWatch(ZooKeeperWatcher zkw,
-      String znode, HServerAddress address)
-  throws KeeperException {
-    return createEphemeralNodeAndWatch(zkw, znode,
-        Bytes.toBytes(address.toString()));
-  }
-
-  /**
    * Sets the data of the existing znode to be the specified data.  Ensures that
    * the current data has the specified expected version.
    *
@@ -745,8 +647,7 @@ public class ZKUtil {
    * @param data data to set for node
    * @throws KeeperException if unexpected zookeeper exception
    */
-  public static void setData(ZooKeeperWatcher zkw, String znode,
-      byte [] data)
+  public static void setData(ZooKeeperWatcher zkw, String znode, byte [] data)
   throws KeeperException, KeeperException.NoNodeException {
     setData(zkw, znode, data, -1);
   }
@@ -1024,10 +925,9 @@ public class ZKUtil {
   public static void deleteChildrenRecursively(ZooKeeperWatcher zkw, String node)
   throws KeeperException {
     List<String> children = ZKUtil.listChildrenNoWatch(zkw, node);
-    if(children != null || !children.isEmpty()) {
-      for(String child : children) {
-        deleteNodeRecursively(zkw, joinZNode(node, child));
-      }
+    if (children == null || children.isEmpty()) return;
+    for(String child : children) {
+      deleteNodeRecursively(zkw, joinZNode(node, child));
     }
   }
 
@@ -1041,13 +941,12 @@ public class ZKUtil {
     try {
       sb.append("HBase is rooted at ").append(zkw.baseZNode);
       sb.append("\nMaster address: ").append(
-          getDataAsAddress(zkw, zkw.masterAddressZNode));
+        Bytes.toStringBinary(getData(zkw, zkw.masterAddressZNode)));
       sb.append("\nRegion server holding ROOT: ").append(
-          getDataAsAddress(zkw, zkw.rootServerZNode));
+        Bytes.toStringBinary(getData(zkw, zkw.rootServerZNode)));
       sb.append("\nRegion servers:");
-      for (HServerAddress address : listChildrenAndGetAsAddresses(zkw,
-          zkw.rsZNode)) {
-        sb.append("\n ").append(address);
+      for (String child: listChildrenNoWatch(zkw, zkw.rsZNode)) {
+        sb.append("\n ").append(child);
       }
       sb.append("\nQuorum Server Statistics:");
       String[] servers = zkw.getQuorum().split(",");

Modified: hbase/trunk/src/main/resources/hbase-webapps/master/master.jsp
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/resources/hbase-webapps/master/master.jsp?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/main/resources/hbase-webapps/master/master.jsp (original)
+++ hbase/trunk/src/main/resources/hbase-webapps/master/master.jsp Wed Apr 27 23:12:42 2011
@@ -7,17 +7,15 @@
   import="org.apache.hadoop.hbase.util.FSUtils"
   import="org.apache.hadoop.hbase.master.HMaster"
   import="org.apache.hadoop.hbase.HConstants"
+  import="org.apache.hadoop.hbase.ServerName"
   import="org.apache.hadoop.hbase.client.HBaseAdmin"
   import="org.apache.hadoop.hbase.client.HConnectionManager"
-  import="org.apache.hadoop.hbase.HServerInfo"
-  import="org.apache.hadoop.hbase.HServerAddress"
   import="org.apache.hadoop.hbase.HTableDescriptor" %><%
   HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
   Configuration conf = master.getConfiguration();
-  HServerAddress rootLocation = master.getCatalogTracker().getRootLocation();
+  ServerName rootLocation = master.getCatalogTracker().getRootLocation();
   boolean metaOnline = master.getCatalogTracker().getMetaLocation() != null;
-  Map<String, HServerInfo> serverToServerInfos =
-    master.getServerManager().getOnlineServers();
+  List<ServerName> servers = master.getServerManager().getOnlineServersList();
   int interval = conf.getInt("hbase.regionserver.msginterval", 1000)/1000;
   if (interval == 0) {
       interval = 1;
@@ -32,12 +30,12 @@
   "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
 <html xmlns="http://www.w3.org/1999/xhtml">
 <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
-<title>HBase Master: <%= master.getMasterAddress().getHostname()%>:<%= master.getMasterAddress().getPort() %></title>
+<title>HBase Master: <%= master.getServerName().getHostAndPort() %></title>
 <link rel="stylesheet" type="text/css" href="/static/hbase.css" />
 </head>
 <body>
 <a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a>
-<h1 id="page_title">Master: <%=master.getMasterAddress().getHostname()%>:<%=master.getMasterAddress().getPort()%></h1>
+<h1 id="page_title">Master: <%=master.getServerName().getHostname()%>:<%=master.getServerName().getPort()%></h1>
 <p id="links_menu"><a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p>
 
 <!-- Various warnings that cluster admins should be aware of -->
@@ -137,26 +135,27 @@
 <% } %>
 
 <h2>Region Servers</h2>
-<% if (serverToServerInfos != null && serverToServerInfos.size() > 0) { %>
+<% if (servers != null && servers.size() > 0) { %>
 <%   int totalRegions = 0;
      int totalRequests = 0; 
 %>
 
 <table>
-<tr><th rowspan="<%= serverToServerInfos.size() + 1%>"></th><th>Address</th><th>Start Code</th><th>Load</th></tr>
-<%   String[] serverNames = serverToServerInfos.keySet().toArray(new String[serverToServerInfos.size()]);
+<tr><th rowspan="<%= servers.size() + 1%>"></th><th>Address</th><th>Start Code</th><th>Load</th></tr>
+<%   ServerName [] serverNames = servers.toArray(new ServerName[servers.size()]);
      Arrays.sort(serverNames);
-     for (String serverName: serverNames) {
-       HServerInfo hsi = serverToServerInfos.get(serverName);
-       String hostname = hsi.getServerAddress().getHostname() + ":" + hsi.getInfoPort();
+     for (ServerName serverName: serverNames) {
+       // HARDCODED FOR NOW; FIX -- READ FROM ZK
+       String hostname = serverName.getHostname() + ":60020";
        String url = "http://" + hostname + "/";
-       totalRegions += hsi.getLoad().getNumberOfRegions();
-       totalRequests += hsi.getLoad().getNumberOfRequests() / interval;
-       long startCode = hsi.getStartCode();
+       // TODO: FIX
+       totalRegions += 0;
+       totalRequests += 0;
+       long startCode = serverName.getStartcode();
 %>
-<tr><td><a href="<%= url %>"><%= hostname %></a></td><td><%= startCode %></td><td><%= hsi.getLoad().toString(interval) %></td></tr>
+<tr><td><a href="<%= url %>"><%= hostname %></a></td><td><%= startCode %></td><td><%= 0 %></td></tr>
 <%   } %>
-<tr><th>Total: </th><td>servers: <%= serverToServerInfos.size() %></td><td>&nbsp;</td><td>requests=<%= totalRequests %>, regions=<%= totalRegions %></td></tr>
+<tr><th>Total: </th><td>servers: <%= servers.size() %></td><td>&nbsp;</td><td>requests=<%= totalRequests %>, regions=<%= totalRegions %></td></tr>
 </table>
 
 <p>Load is requests per second and count of regions loaded</p>

Modified: hbase/trunk/src/main/resources/hbase-webapps/master/table.jsp
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/resources/hbase-webapps/master/table.jsp?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/main/resources/hbase-webapps/master/table.jsp (original)
+++ hbase/trunk/src/main/resources/hbase-webapps/master/table.jsp Wed Apr 27 23:12:42 2011
@@ -6,6 +6,7 @@
   import="org.apache.hadoop.hbase.client.HBaseAdmin"
   import="org.apache.hadoop.hbase.client.HConnectionManager"
   import="org.apache.hadoop.hbase.HRegionInfo"
+  import="org.apache.hadoop.hbase.ServerName"
   import="org.apache.hadoop.hbase.HServerAddress"
   import="org.apache.hadoop.hbase.HServerInfo"
   import="org.apache.hadoop.hbase.HServerLoad"
@@ -22,7 +23,7 @@
   String tableName = request.getParameter("name");
   HTable table = new HTable(conf, tableName);
   String tableHeader = "<h2>Table Regions</h2><table><tr><th>Name</th><th>Region Server</th><th>Start Key</th><th>End Key</th><th>Requests</th></tr>";
-  HServerAddress rl = master.getCatalogTracker().getRootLocation();
+  ServerName rl = master.getCatalogTracker().getRootLocation();
   boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
   Map<String, Integer> frags = null;
   if (showFragmentation) {
@@ -83,8 +84,9 @@
   if(tableName.equals(Bytes.toString(HConstants.ROOT_TABLE_NAME))) {
 %>
 <%= tableHeader %>
+// HARDCODED FOR NOW TODO: FIX GET FROM ZK
 <%
-  int infoPort = master.getServerManager().getHServerInfo(rl).getInfoPort();
+  int infoPort = 60020; // HARDCODED FOR NOW -- TODO FIX
   String url = "http://" + rl.getHostname() + ":" + infoPort + "/";
 %>
 <tr>
@@ -102,9 +104,9 @@
 <%
   // NOTE: Presumes one meta region only.
   HRegionInfo meta = HRegionInfo.FIRST_META_REGIONINFO;
-  HServerAddress metaLocation = master.getCatalogTracker().getMetaLocation();
+  ServerName metaLocation = master.getCatalogTracker().getMetaLocation();
   for (int i = 0; i < 1; i++) {
-    int infoPort = master.getServerManager().getHServerInfo(metaLocation).getInfoPort();
+    int infoPort = 60020; // HARDCODED FOR NOW -- TODO FIX
     String url = "http://" + metaLocation.getHostname() + ":" + infoPort + "/";
 %>
 <tr>
@@ -141,7 +143,7 @@
   if(regions != null && regions.size() > 0) { %>
 <%=     tableHeader %>
 <%
-  for(Map.Entry<HRegionInfo, HServerAddress> hriEntry : regions.entrySet()) {
+  for (Map.Entry<HRegionInfo, HServerAddress> hriEntry : regions.entrySet()) {
     HRegionInfo regionInfo = hriEntry.getKey();
     HServerAddress addr = hriEntry.getValue();
     long req = 0;
@@ -150,16 +152,16 @@
     String urlRegionServer = null;
 
     if (addr != null) {
-      HServerInfo info = master.getServerManager().getHServerInfo(addr);
-      if (info != null) {
-        HServerLoad sl = info.getLoad();
+      HServerLoad sl = master.getServerManager().getLoad(addr);
+      if (sl != null) {
         Map<byte[], RegionLoad> map = sl.getRegionsLoad();
         if (map.containsKey(regionInfo.getRegionName())) {
           req = map.get(regionInfo.getRegionName()).getRequestsCount();
         }
-        infoPort = info.getInfoPort();
+        // This port might be wrong if RS actually ended up using something else.
+        int port = conf.getInt("hbase.regionserver.info.port", 60030);
         urlRegionServer =
-            "http://" + addr.getHostname().toString() + ":" + infoPort + "/";
+            "http://" + addr.getHostname().toString() + ":" + port + "/";
         Integer i = regDistribution.get(urlRegionServer);
         if (null == i) i = new Integer(0);
         regDistribution.put(urlRegionServer, i+1);

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java Wed Apr 27 23:12:42 2011
@@ -23,8 +23,6 @@ import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -39,7 +37,6 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.io.MapWritable;
-import org.apache.zookeeper.KeeperException;
 
 /**
  * This class creates a single process HBase cluster.
@@ -84,75 +81,6 @@ public class MiniHBaseCluster {
   }
 
   /**
-   * Override Master so can add inject behaviors testing.
-   */
-  public static class MiniHBaseClusterMaster extends HMaster {
-    private final Map<HServerInfo, List<HMsg>> messages =
-      new ConcurrentHashMap<HServerInfo, List<HMsg>>();
-
-    private final Map<HServerInfo, IOException> exceptions =
-      new ConcurrentHashMap<HServerInfo, IOException>();
-
-    public MiniHBaseClusterMaster(final Configuration conf)
-    throws IOException, KeeperException, InterruptedException {
-      super(conf);
-    }
-
-    /**
-     * Add a message to send to a regionserver next time it checks in.
-     * @param hsi RegionServer's HServerInfo.
-     * @param msg Message to add.
-     */
-    void addMessage(final HServerInfo hsi, HMsg msg) {
-      synchronized(this.messages) {
-        List<HMsg> hmsgs = this.messages.get(hsi);
-        if (hmsgs == null) {
-          hmsgs = new ArrayList<HMsg>();
-          this.messages.put(hsi, hmsgs);
-        }
-        hmsgs.add(msg);
-      }
-    }
-
-    void addException(final HServerInfo hsi, final IOException ex) {
-      this.exceptions.put(hsi, ex);
-    }
-
-    /**
-     * This implementation is special, exceptions will be treated first and
-     * message won't be sent back to the region servers even if some are
-     * specified.
-     * @param hsi the rs
-     * @param msgs Messages to add to
-     * @return
-     * @throws IOException will be throw if any added for this region server
-     */
-    @Override
-    protected HMsg[] adornRegionServerAnswer(final HServerInfo hsi,
-        final HMsg[] msgs) throws IOException {
-      IOException ex = this.exceptions.remove(hsi);
-      if (ex != null) {
-        throw ex;
-      }
-      HMsg [] answerMsgs = msgs;
-      synchronized (this.messages) {
-        List<HMsg> hmsgs = this.messages.get(hsi);
-        if (hmsgs != null && !hmsgs.isEmpty()) {
-          int size = answerMsgs.length;
-          HMsg [] newAnswerMsgs = new HMsg[size + hmsgs.size()];
-          System.arraycopy(answerMsgs, 0, newAnswerMsgs, 0, answerMsgs.length);
-          for (int i = 0; i < hmsgs.size(); i++) {
-            newAnswerMsgs[answerMsgs.length + i] = hmsgs.get(i);
-          }
-          answerMsgs = newAnswerMsgs;
-          hmsgs.clear();
-        }
-      }
-      return super.adornRegionServerAnswer(hsi, answerMsgs);
-    }
-  }
-
-  /**
    * Subclass so can get at protected methods (none at moment).  Also, creates
    * a FileSystem instance per instantiation.  Adds a shutdown own FileSystem
    * on the way out. Shuts down own Filesystem only, not All filesystems as
@@ -176,10 +104,6 @@ public class MiniHBaseCluster {
       return super.closeRegion(region);
     }
 
-    public void setHServerInfo(final HServerInfo hsi) {
-      this.serverInfo = hsi;
-    }
-
     /*
      * @param c
      * @param currentfs We return this if we did not make a new one.
@@ -266,8 +190,7 @@ public class MiniHBaseCluster {
     try {
       // start up a LocalHBaseCluster
       hbaseCluster = new LocalHBaseCluster(conf, nMasterNodes, 0,
-          MiniHBaseCluster.MiniHBaseClusterMaster.class,
-          MiniHBaseCluster.MiniHBaseClusterRegionServer.class);
+        HMaster.class, MiniHBaseCluster.MiniHBaseClusterRegionServer.class);
 
       // manually add the regionservers as other users
       for (int i=0; i<nRegionNodes; i++) {
@@ -385,15 +308,6 @@ public class MiniHBaseCluster {
   }
 
   /**
-   * @return Returns the rpc address actually used by the currently active
-   * master server, because the supplied port is not necessarily the actual port
-   * used.
-   */
-  public HServerAddress getHMasterAddress() {
-    return this.hbaseCluster.getActiveMaster().getMasterAddress();
-  }
-
-  /**
    * Returns the current active master, if available.
    * @return the active HMaster, null if none is active.
    */
@@ -607,59 +521,6 @@ public class MiniHBaseCluster {
   }
 
   /**
-   * Add an exception to send when a region server checks back in
-   * @param serverNumber Which server to send it to
-   * @param ex The exception that will be sent
-   * @throws IOException
-   */
-  public void addExceptionToSendRegionServer(final int serverNumber,
-      IOException ex) throws IOException {
-    MiniHBaseClusterRegionServer hrs =
-      (MiniHBaseClusterRegionServer)getRegionServer(serverNumber);
-    addExceptionToSendRegionServer(hrs, ex);
-  }
-
-  /**
-   * Add an exception to send when a region server checks back in
-   * @param hrs Which server to send it to
-   * @param ex The exception that will be sent
-   * @throws IOException
-   */
-  public void addExceptionToSendRegionServer(
-      final MiniHBaseClusterRegionServer hrs, IOException ex)
-      throws IOException {
-    ((MiniHBaseClusterMaster)getMaster()).addException(hrs.getHServerInfo(),ex);
-  }
-
-  /**
-   * Add a message to include in the responses send a regionserver when it
-   * checks back in.
-   * @param serverNumber Which server to send it to.
-   * @param msg The MESSAGE
-   * @throws IOException
-   */
-  public void addMessageToSendRegionServer(final int serverNumber,
-    final HMsg msg)
-  throws IOException {
-    MiniHBaseClusterRegionServer hrs =
-      (MiniHBaseClusterRegionServer)getRegionServer(serverNumber);
-    addMessageToSendRegionServer(hrs, msg);
-  }
-
-  /**
-   * Add a message to include in the responses send a regionserver when it
-   * checks back in.
-   * @param hrs Which region server.
-   * @param msg The MESSAGE
-   * @throws IOException
-   */
-  public void addMessageToSendRegionServer(final MiniHBaseClusterRegionServer hrs,
-    final HMsg msg)
-  throws IOException {
-    ((MiniHBaseClusterMaster)getMaster()).addMessage(hrs.getHServerInfo(), msg);
-  }
-
-  /**
    * Counts the total numbers of regions being served by the currently online
    * region servers by asking each how many regions they have.  Does not look
    * at META at all.  Count includes catalog tables.
@@ -672,4 +533,4 @@ public class MiniHBaseCluster {
     }
     return count;
   }
-}
\ No newline at end of file
+}

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java?rev=1097275&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java Wed Apr 27 23:12:42 2011
@@ -0,0 +1,67 @@
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
+
+import org.junit.Test;
+
+public class TestHRegionLocation {
+  @Test
+  public void testHashAndEqualsCode() {
+    ServerName hsa1 = new ServerName("localhost", 1234, -1L);
+    HRegionLocation hrl1 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
+      hsa1.getHostname(), hsa1.getPort());
+    HRegionLocation hrl2 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
+      hsa1.getHostname(), hsa1.getPort());
+    assertEquals(hrl1.hashCode(), hrl2.hashCode());
+    assertTrue(hrl1.equals(hrl2));
+    HRegionLocation hrl3 = new HRegionLocation(HRegionInfo.ROOT_REGIONINFO,
+      hsa1.getHostname(), hsa1.getPort());
+    assertNotSame(hrl1, hrl3);
+    assertFalse(hrl1.equals(hrl3));
+  }
+
+  @Test
+  public void testToString() {
+    ServerName hsa1 = new ServerName("localhost", 1234, -1L);
+    HRegionLocation hrl1 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
+      hsa1.getHostname(), hsa1.getPort());
+    System.out.println(hrl1.toString());
+  }
+
+  @Test
+  public void testCompareTo() {
+    ServerName hsa1 = new ServerName("localhost", 1234, -1L);
+    HRegionLocation hsl1 =
+      new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa1.getHostname(), hsa1.getPort());
+    ServerName hsa2 = new ServerName("localhost", 1235, -1L);
+    HRegionLocation hsl2 =
+      new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa2.getHostname(), hsa2.getPort());
+    assertTrue(hsl1.compareTo(hsl1) == 0);
+    assertTrue(hsl2.compareTo(hsl2) == 0);
+    int compare1 = hsl1.compareTo(hsl2);
+    int compare2 = hsl2.compareTo(hsl1);
+    assertTrue((compare1 > 0)? compare2 < 0: compare2 > 0);
+  }
+}
\ No newline at end of file

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHServerAddress.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHServerAddress.java?rev=1097275&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHServerAddress.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHServerAddress.java Wed Apr 27 23:12:42 2011
@@ -0,0 +1,83 @@
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.hbase.util.Writables;
+import org.junit.Test;
+
+/**
+ * Tests for {@link HServerAddress}
+ */
+public class TestHServerAddress {
+  @Test
+  public void testHashCode() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerAddress hsa2 = new HServerAddress("localhost", 1234);
+    assertEquals(hsa1.hashCode(), hsa2.hashCode());
+    HServerAddress hsa3 = new HServerAddress("localhost", 1235);
+    assertNotSame(hsa1.hashCode(), hsa3.hashCode());
+  }
+
+  @Test
+  public void testHServerAddress() {
+    new HServerAddress();
+  }
+
+  @Test
+  public void testHServerAddressInetSocketAddress() {
+    HServerAddress hsa1 =
+      new HServerAddress(new InetSocketAddress("localhost", 1234));
+    System.out.println(hsa1.toString());
+  }
+
+  @Test
+  public void testHServerAddressString() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerAddress hsa2 =
+      new HServerAddress(new InetSocketAddress("localhost", 1234));
+    assertTrue(hsa1.equals(hsa2));
+  }
+
+  @Test
+  public void testHServerAddressHServerAddress() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerAddress hsa2 = new HServerAddress(hsa1);
+    assertEquals(hsa1, hsa2);
+  }
+
+  @Test
+  public void testReadFields() throws IOException {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerAddress hsa2 = new HServerAddress("localhost", 1235);
+    byte [] bytes = Writables.getBytes(hsa1);
+    HServerAddress deserialized =
+      (HServerAddress)Writables.getWritable(bytes, new HServerAddress());
+    assertEquals(hsa1, deserialized);
+    bytes = Writables.getBytes(hsa2);
+    deserialized =
+      (HServerAddress)Writables.getWritable(bytes, new HServerAddress());
+    assertNotSame(hsa1, deserialized);
+  }
+}
\ No newline at end of file

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java?rev=1097275&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java Wed Apr 27 23:12:42 2011
@@ -0,0 +1,80 @@
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.util.Writables;
+import org.junit.Test;
+
+public class TestHServerInfo {
+
+  @Test
+  public void testHashCodeAndEquals() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
+    HServerInfo hsi2 = new HServerInfo(hsa1, 1L, 5678);
+    HServerInfo hsi3 = new HServerInfo(hsa1, 2L, 5678);
+    HServerInfo hsi4 = new HServerInfo(hsa1, 1L, 5677);
+    HServerAddress hsa2 = new HServerAddress("localhost", 1235);
+    HServerInfo hsi5 = new HServerInfo(hsa2, 1L, 5678);
+    assertEquals(hsi1.hashCode(), hsi2.hashCode());
+    assertTrue(hsi1.equals(hsi2));
+    assertNotSame(hsi1.hashCode(), hsi3.hashCode());
+    assertFalse(hsi1.equals(hsi3));
+    assertNotSame(hsi1.hashCode(), hsi4.hashCode());
+    assertFalse(hsi1.equals(hsi4));
+    assertNotSame(hsi1.hashCode(), hsi5.hashCode());
+    assertFalse(hsi1.equals(hsi5));
+  }
+
+  @Test
+  public void testHServerInfoHServerInfo() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
+    HServerInfo hsi2 = new HServerInfo(hsi1);
+    assertEquals(hsi1, hsi2);
+  }
+
+  @Test
+  public void testGetServerAddress() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
+    assertEquals(hsi1.getServerAddress(), hsa1);
+  }
+
+  @Test
+  public void testToString() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
+    System.out.println(hsi1.toString());
+  }
+
+  @Test
+  public void testReadFields() throws IOException {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
+    HServerAddress hsa2 = new HServerAddress("localhost", 1235);
+    HServerInfo hsi2 = new HServerInfo(hsa2, 1L, 5678);
+    byte [] bytes = Writables.getBytes(hsi1);
+    HServerInfo deserialized =
+      (HServerInfo)Writables.getWritable(bytes, new HServerInfo());
+    assertEquals(hsi1, deserialized);
+    bytes = Writables.getBytes(hsi2);
+    deserialized = (HServerInfo)Writables.getWritable(bytes, new HServerInfo());
+    assertNotSame(hsa1, deserialized);
+  }
+
+  @Test
+  public void testCompareTo() {
+    HServerAddress hsa1 = new HServerAddress("localhost", 1234);
+    HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
+    HServerAddress hsa2 = new HServerAddress("localhost", 1235);
+    HServerInfo hsi2 = new HServerInfo(hsa2, 1L, 5678);
+    assertTrue(hsi1.compareTo(hsi1) == 0);
+    assertTrue(hsi2.compareTo(hsi2) == 0);
+    int compare1 = hsi1.compareTo(hsi2);
+    int compare2 = hsi2.compareTo(hsi1);
+    assertTrue((compare1 > 0)? compare2 < 0: compare2 > 0);
+  }
+}
\ No newline at end of file

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java Wed Apr 27 23:12:42 2011
@@ -22,10 +22,15 @@ package org.apache.hadoop.hbase;
 import java.io.IOException;
 import java.util.List;
 import java.util.ArrayList;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 
@@ -33,7 +38,6 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
-import org.apache.hadoop.hbase.util.Threads;
 
 /**
  * Test whether region rebalancing works. (HBASE-71)
@@ -93,8 +97,16 @@ public class TestRegionRebalancing exten
    * For HBASE-71. Try a few different configurations of starting and stopping
    * region servers to see if the assignment or regions is pretty balanced.
    * @throws IOException
+   * @throws InterruptedException
    */
-  public void testRebalancing() throws IOException {
+  public void testRebalancing() throws IOException, InterruptedException {
+    HConnection connection = HConnectionManager.getConnection(conf);
+    CatalogTracker ct = new CatalogTracker(connection);
+    ct.start();
+    Map<HRegionInfo, ServerName> regions = MetaReader.fullScan(ct);
+    for (Map.Entry<HRegionInfo, ServerName> e: regions.entrySet()) {
+      LOG.info(e);
+    }
     table = new HTable(conf, "test");
     assertEquals("Test table should have 20 regions",
       20, table.getStartKeys().length);
@@ -102,39 +114,34 @@ public class TestRegionRebalancing exten
     // verify that the region assignments are balanced to start out
     assertRegionsAreBalanced();
 
-    LOG.debug("Adding 2nd region server.");
     // add a region server - total of 2
-    LOG.info("Started=" +
+    LOG.info("Started second server=" +
       cluster.startRegionServer().getRegionServer().getServerName());
     cluster.getMaster().balance();
     assertRegionsAreBalanced();
 
     // add a region server - total of 3
-    LOG.debug("Adding 3rd region server.");
-    LOG.info("Started=" +
+    LOG.info("Started third server=" +
       cluster.startRegionServer().getRegionServer().getServerName());
     cluster.getMaster().balance();
     assertRegionsAreBalanced();
 
     // kill a region server - total of 2
-    LOG.debug("Killing the 3rd region server.");
-    LOG.info("Stopped=" + cluster.stopRegionServer(2, false));
+    LOG.info("Stopped third server=" + cluster.stopRegionServer(2, false));
     cluster.waitOnRegionServer(2);
     cluster.getMaster().balance();
     assertRegionsAreBalanced();
 
     // start two more region servers - total of 4
-    LOG.debug("Adding 3rd region server");
-    LOG.info("Started=" +
+    LOG.info("Readding third server=" +
       cluster.startRegionServer().getRegionServer().getServerName());
-    LOG.debug("Adding 4th region server");
-    LOG.info("Started=" +
+    LOG.info("Added fourth server=" +
       cluster.startRegionServer().getRegionServer().getServerName());
     cluster.getMaster().balance();
     assertRegionsAreBalanced();
 
     for (int i = 0; i < 6; i++){
-      LOG.debug("Adding " + (i + 5) + "th region server");
+      LOG.info("Adding " + (i + 5) + "th region server");
       cluster.startRegionServer();
     }
     cluster.getMaster().balance();
@@ -169,7 +176,7 @@ public class TestRegionRebalancing exten
 
       int regionCount = getRegionCount();
       List<HRegionServer> servers = getOnlineRegionServers();
-      double avg = cluster.getMaster().getServerManager().getAverageLoad();
+      double avg = cluster.getMaster().getAverageLoad();
       int avgLoadPlusSlop = (int)Math.ceil(avg * (1 + slop));
       int avgLoadMinusSlop = (int)Math.floor(avg * (1 - slop)) - 1;
       LOG.debug("There are " + servers.size() + " servers and " + regionCount

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestSerialization.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestSerialization.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestSerialization.java Wed Apr 27 23:12:42 2011
@@ -130,19 +130,6 @@ public class TestSerialization {
       HConstants.EMPTY_END_ROW);
   }
 
-  /**
-   * Test ServerInfo serialization
-   * @throws Exception
-   */
-  @Test public void testServerInfo() throws Exception {
-    HServerInfo hsi = new HServerInfo(new HServerAddress("0.0.0.0:123"), -1,
-      1245, "default name");
-    byte [] b = Writables.getBytes(hsi);
-    HServerInfo deserializedHsi =
-      (HServerInfo)Writables.getWritable(b, new HServerInfo());
-    assertTrue(hsi.equals(deserializedHsi));
-  }
-
   @Test public void testPut() throws Exception{
     byte[] row = "row".getBytes();
     byte[] fam = "fam".getBytes();
@@ -584,4 +571,4 @@ public class TestSerialization {
         HConstants.REPLICATION_SCOPE_LOCAL));
     return htd;
   }
-}
\ No newline at end of file
+}

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestServerName.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestServerName.java?rev=1097275&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestServerName.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestServerName.java Wed Apr 27 23:12:42 2011
@@ -0,0 +1,56 @@
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.*;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.junit.Test;
+
+public class TestServerName {
+  @Test
+  public void testServerName() {
+    ServerName sn = new ServerName("www.example.org", 1234, 5678);
+    ServerName sn2 = new ServerName("www.example.org", 1234, 5678);
+    ServerName sn3 = new ServerName("www.example.org", 1234, 56789);
+    assertTrue(sn.equals(sn2));
+    assertFalse(sn.equals(sn3));
+    assertEquals(sn.hashCode(), sn2.hashCode());
+    assertNotSame(sn.hashCode(), sn3.hashCode());
+    assertEquals(sn.toString(),
+      ServerName.getServerName("www.example.org", 1234, 5678));
+    assertEquals(sn.toString(),
+      ServerName.getServerName("www.example.org:1234", 5678));
+    assertEquals(sn.toString(),
+      "www.example.org" + ServerName.SERVERNAME_SEPARATOR +
+      "1234" + ServerName.SERVERNAME_SEPARATOR + "5678");
+  }
+
+  @Test
+  public void getServerStartcodeFromServerName() {
+    ServerName sn = new ServerName("www.example.org", 1234, 5678);
+    assertEquals(5678,
+      ServerName.getServerStartcodeFromServerName(sn.toString()));
+    assertNotSame(5677,
+      ServerName.getServerStartcodeFromServerName(sn.toString()));
+  }
+}
\ No newline at end of file

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java Wed Apr 27 23:12:42 2011
@@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.Result;
@@ -63,8 +63,8 @@ import org.mockito.Mockito;
 public class TestCatalogTracker {
   private static final Log LOG = LogFactory.getLog(TestCatalogTracker.class);
   private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static final HServerAddress HSA =
-    new HServerAddress("example.org:1234");
+  private static final ServerName HSA =
+    new ServerName("example.org", 1234, System.currentTimeMillis());
   private ZooKeeperWatcher watcher;
   private Abortable abortable;
 
@@ -115,7 +115,7 @@ public class TestCatalogTracker {
     final CatalogTracker ct = constructAndStartCatalogTracker(connection);
     try {
       RootLocationEditor.setRootLocation(this.watcher,
-        new HServerAddress("example.com:1234"));
+        new ServerName("example.com", 1234, System.currentTimeMillis()));
     } finally {
       // Clean out root location or later tests will be confused... they presume
       // start fresh in zk.
@@ -131,9 +131,9 @@ public class TestCatalogTracker {
   @Test public void testInterruptWaitOnMetaAndRoot()
   throws IOException, InterruptedException {
     final CatalogTracker ct = constructAndStartCatalogTracker();
-    HServerAddress hsa = ct.getRootLocation();
+    ServerName hsa = ct.getRootLocation();
     Assert.assertNull(hsa);
-    HServerAddress meta = ct.getMetaLocation();
+    ServerName meta = ct.getMetaLocation();
     Assert.assertNull(meta);
     Thread t = new Thread() {
       @Override
@@ -169,7 +169,7 @@ public class TestCatalogTracker {
     final CatalogTracker ct = constructAndStartCatalogTracker(connection);
     try {
       RootLocationEditor.setRootLocation(this.watcher,
-        new HServerAddress("example.com:1234"));
+        new ServerName("example.com", 1234, System.currentTimeMillis()));
       Assert.assertFalse(ct.verifyMetaRegionLocation(100));
     } finally {
       // Clean out root location or later tests will be confused... they presume
@@ -200,7 +200,7 @@ public class TestCatalogTracker {
     final CatalogTracker ct = constructAndStartCatalogTracker(connection);
     try {
       RootLocationEditor.setRootLocation(this.watcher,
-        new HServerAddress("example.com:1234"));
+        new ServerName("example.com", 1234, System.currentTimeMillis()));
       Assert.assertFalse(ct.verifyRootRegionLocation(100));
     } finally {
       // Clean out root location or later tests will be confused... they presume
@@ -232,7 +232,7 @@ public class TestCatalogTracker {
   @Test public void testNoTimeoutWaitForRoot()
   throws IOException, InterruptedException, KeeperException {
     final CatalogTracker ct = constructAndStartCatalogTracker();
-    HServerAddress hsa = ct.getRootLocation();
+    ServerName hsa = ct.getRootLocation();
     Assert.assertNull(hsa);
 
     // Now test waiting on root location getting set.
@@ -246,7 +246,7 @@ public class TestCatalogTracker {
     Assert.assertTrue(ct.getRootLocation().equals(hsa));
   }
 
-  private HServerAddress setRootLocation() throws KeeperException {
+  private ServerName setRootLocation() throws KeeperException {
     RootLocationEditor.setRootLocation(this.watcher, HSA);
     return HSA;
   }
@@ -270,7 +270,7 @@ public class TestCatalogTracker {
       thenReturn(mockHRI);
 
     final CatalogTracker ct = constructAndStartCatalogTracker(connection);
-    HServerAddress hsa = ct.getMetaLocation();
+    ServerName hsa = ct.getMetaLocation();
     Assert.assertNull(hsa);
 
     // Now test waiting on meta location getting set.
@@ -300,8 +300,7 @@ public class TestCatalogTracker {
     // been assigned.
     String node = ct.getMetaNodeTracker().getNode();
     ZKUtil.createAndFailSilent(this.watcher, node);
-    MetaEditor.updateMetaLocation(ct, HRegionInfo.FIRST_META_REGIONINFO,
-      new HServerInfo(HSA, -1, "example.com"));
+    MetaEditor.updateMetaLocation(ct, HRegionInfo.FIRST_META_REGIONINFO, HSA);
     ZKUtil.deleteNode(this.watcher, node);
     // Join the thread... should exit shortly.
     t.join();

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java Wed Apr 27 23:12:42 2011
@@ -23,7 +23,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.Test;
 
@@ -53,7 +53,8 @@ public class TestCatalogTrackerOnCluster
           LOG.error("Abort was called on 'bad root location writer'", e);
         }
     });
-    HServerAddress nonsense = new HServerAddress("example.org:1234");
+    ServerName nonsense =
+      new ServerName("example.org", 1234, System.currentTimeMillis());
     RootLocationEditor.setRootLocation(zookeeper, nonsense);
     // Bring back up the hbase cluster.  See if it can deal with nonsense root
     // location.

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java Wed Apr 27 23:12:42 2011
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.Abortable
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -116,7 +116,7 @@ public class TestMetaReaderEditor {
     // Test it works getting a region from user table.
     List<HRegionInfo> regions = MetaReader.getTableRegions(ct, nameBytes);
     assertEquals(regionCount, regions.size());
-    Pair<HRegionInfo, HServerAddress> pair =
+    Pair<HRegionInfo, ServerName> pair =
       MetaReader.getRegion(ct, regions.get(0).getRegionName());
     assertEquals(regions.get(0).getEncodedName(),
       pair.getFirst().getEncodedName());

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java Wed Apr 27 23:12:42 2011
@@ -121,6 +121,154 @@ public class TestFromClientSide {
   }
 
   /**
+   * HBASE-2468 use case 1 and 2: region info de/serialization
+   */
+   @Test
+   public void testRegionCacheDeSerialization() throws Exception {
+     // 1. test serialization.
+     LOG.info("Starting testRegionCacheDeSerialization");
+     final byte[] TABLENAME = Bytes.toBytes("testCachePrewarm2");
+     final byte[] FAMILY = Bytes.toBytes("family");
+     Configuration conf = TEST_UTIL.getConfiguration();
+     TEST_UTIL.createTable(TABLENAME, FAMILY);
+
+     // Set up test table:
+     // Create table:
+     HTable table = new HTable(conf, TABLENAME);
+
+     // Create multiple regions for this table
+     TEST_UTIL.createMultiRegions(table, FAMILY);
+     Scan s = new Scan();
+     ResultScanner scanner = table.getScanner(s);
+     while (scanner.next() != null) continue;
+
+     Path tempPath = new Path(HBaseTestingUtility.getTestDir(), "regions.dat");
+
+     final String tempFileName = tempPath.toString();
+
+     FileOutputStream fos = new FileOutputStream(tempFileName);
+     DataOutputStream dos = new DataOutputStream(fos);
+
+     // serialize the region info and output to a local file.
+     table.serializeRegionInfo(dos);
+     dos.flush();
+     dos.close();
+
+     // read a local file and deserialize the region info from it.
+     FileInputStream fis = new FileInputStream(tempFileName);
+     DataInputStream dis = new DataInputStream(fis);
+
+     Map<HRegionInfo, HServerAddress> deserRegions =
+       table.deserializeRegionInfo(dis);
+     dis.close();
+
+     // regions obtained from meta scanner.
+     Map<HRegionInfo, HServerAddress> loadedRegions =
+       table.getRegionsInfo();
+
+     // set the deserialized regions to the global cache.
+     table.getConnection().clearRegionCache();
+
+     table.getConnection().prewarmRegionCache(table.getTableName(),
+         deserRegions);
+
+     // verify whether the 2 maps are identical or not.
+     assertEquals("Number of cached region is incorrect",
+         HConnectionManager.getCachedRegionCount(conf, TABLENAME),
+         loadedRegions.size());
+
+     // verify each region is prefetched or not.
+     for (Map.Entry<HRegionInfo, HServerAddress> e: loadedRegions.entrySet()) {
+       HRegionInfo hri = e.getKey();
+       assertTrue(HConnectionManager.isRegionCached(conf,
+           hri.getTableDesc().getName(), hri.getStartKey()));
+     }
+
+     // delete the temp file
+     File f = new java.io.File(tempFileName);
+     f.delete();
+     LOG.info("Finishing testRegionCacheDeSerialization");
+   }
+
+  /**
+   * HBASE-2468 use case 3:
+   */
+  @Test
+  public void testRegionCachePreWarm() throws Exception {
+    LOG.info("Starting testRegionCachePreWarm");
+    final byte [] TABLENAME = Bytes.toBytes("testCachePrewarm");
+    Configuration conf = TEST_UTIL.getConfiguration();
+
+    // Set up test table:
+    // Create table:
+    TEST_UTIL.createTable(TABLENAME, FAMILY);
+
+    // disable region cache for the table.
+    HTable.setRegionCachePrefetch(conf, TABLENAME, false);
+    assertFalse("The table is disabled for region cache prefetch",
+        HTable.getRegionCachePrefetch(conf, TABLENAME));
+
+    HTable table = new HTable(conf, TABLENAME);
+
+    // create many regions for the table.
+    TEST_UTIL.createMultiRegions(table, FAMILY);
+    // This count effectively waits until the regions have been
+    // fully assigned
+    TEST_UTIL.countRows(table);
+    table.getConnection().clearRegionCache();
+    assertEquals("Clearing cache should have 0 cached ", 0,
+        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
+
+    // A Get is suppose to do a region lookup request
+    Get g = new Get(Bytes.toBytes("aaa"));
+    table.get(g);
+
+    // only one region should be cached if the cache prefetch is disabled.
+    assertEquals("Number of cached region is incorrect ", 1,
+        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
+
+    // now we enable cached prefetch.
+    HTable.setRegionCachePrefetch(conf, TABLENAME, true);
+    assertTrue("The table is enabled for region cache prefetch",
+        HTable.getRegionCachePrefetch(conf, TABLENAME));
+
+    HTable.setRegionCachePrefetch(conf, TABLENAME, false);
+    assertFalse("The table is disabled for region cache prefetch",
+        HTable.getRegionCachePrefetch(conf, TABLENAME));
+
+    HTable.setRegionCachePrefetch(conf, TABLENAME, true);
+    assertTrue("The table is enabled for region cache prefetch",
+        HTable.getRegionCachePrefetch(conf, TABLENAME));
+
+    table.getConnection().clearRegionCache();
+
+    assertEquals("Number of cached region is incorrect ", 0,
+        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
+
+    // if there is a cache miss, some additional regions should be prefetched.
+    Get g2 = new Get(Bytes.toBytes("bbb"));
+    table.get(g2);
+
+    // Get the configured number of cache read-ahead regions.
+    int prefetchRegionNumber = conf.getInt("hbase.client.prefetch.limit", 10);
+
+    // the total number of cached regions == region('aaa") + prefeched regions.
+    LOG.info("Testing how many regions cached");
+    assertEquals("Number of cached region is incorrect ", prefetchRegionNumber,
+        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
+
+    table.getConnection().clearRegionCache();
+
+    Get g3 = new Get(Bytes.toBytes("abc"));
+    table.get(g3);
+    assertEquals("Number of cached region is incorrect ", prefetchRegionNumber,
+        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
+
+    LOG.info("Finishing testRegionCachePreWarm");
+  }
+
+
+  /**
    * Verifies that getConfiguration returns the same Configuration object used
    * to create the HTable instance.
    */
@@ -3762,150 +3910,7 @@ public class TestFromClientSide {
     assertTrue(scan.getFamilyMap().containsKey(FAMILY));
   }
 
-  /**
-   * HBASE-2468 use case 1 and 2: region info de/serialization
-   */
-   @Test
-   public void testRegionCacheDeSerialization() throws Exception {
-     // 1. test serialization.
-     LOG.info("Starting testRegionCacheDeSerialization");
-     final byte[] TABLENAME = Bytes.toBytes("testCachePrewarm2");
-     final byte[] FAMILY = Bytes.toBytes("family");
-     Configuration conf = TEST_UTIL.getConfiguration();
-     TEST_UTIL.createTable(TABLENAME, FAMILY);
-
-     // Set up test table:
-     // Create table:
-     HTable table = new HTable(conf, TABLENAME);
-
-     // Create multiple regions for this table
-     TEST_UTIL.createMultiRegions(table, FAMILY);
-
-     Path tempPath = new Path(HBaseTestingUtility.getTestDir(), "regions.dat");
-
-     final String tempFileName = tempPath.toString();
-
-     FileOutputStream fos = new FileOutputStream(tempFileName);
-     DataOutputStream dos = new DataOutputStream(fos);
-
-     // serialize the region info and output to a local file.
-     table.serializeRegionInfo(dos);
-     dos.flush();
-     dos.close();
-
-     // read a local file and deserialize the region info from it.
-     FileInputStream fis = new FileInputStream(tempFileName);
-     DataInputStream dis = new DataInputStream(fis);
-
-     Map<HRegionInfo, HServerAddress> deserRegions =
-       table.deserializeRegionInfo(dis);
-     dis.close();
-
-     // regions obtained from meta scanner.
-     Map<HRegionInfo, HServerAddress> loadedRegions =
-       table.getRegionsInfo();
-
-     // set the deserialized regions to the global cache.
-     table.getConnection().clearRegionCache();
-
-     table.getConnection().prewarmRegionCache(table.getTableName(),
-         deserRegions);
-
-     // verify whether the 2 maps are identical or not.
-     assertEquals("Number of cached region is incorrect",
-         HConnectionManager.getCachedRegionCount(conf, TABLENAME),
-         loadedRegions.size());
-
-     // verify each region is prefetched or not.
-     for (Map.Entry<HRegionInfo, HServerAddress> e: loadedRegions.entrySet()) {
-       HRegionInfo hri = e.getKey();
-       assertTrue(HConnectionManager.isRegionCached(conf,
-           hri.getTableDesc().getName(), hri.getStartKey()));
-     }
-
-     // delete the temp file
-     File f = new java.io.File(tempFileName);
-     f.delete();
-     LOG.info("Finishing testRegionCacheDeSerialization");
-   }
-
-  /**
-   * HBASE-2468 use case 3:
-   */
-  @Test
-  public void testRegionCachePreWarm() throws Exception {
-    LOG.info("Starting testRegionCachePreWarm");
-    final byte [] TABLENAME = Bytes.toBytes("testCachePrewarm");
-    Configuration conf = TEST_UTIL.getConfiguration();
-
-    // Set up test table:
-    // Create table:
-    TEST_UTIL.createTable(TABLENAME, FAMILY);
-
-    // disable region cache for the table.
-    HTable.setRegionCachePrefetch(conf, TABLENAME, false);
-    assertFalse("The table is disabled for region cache prefetch",
-        HTable.getRegionCachePrefetch(conf, TABLENAME));
-
-    HTable table = new HTable(conf, TABLENAME);
-
-    // create many regions for the table.
-    TEST_UTIL.createMultiRegions(table, FAMILY);
-    // This count effectively waits until the regions have been
-    // fully assigned
-    TEST_UTIL.countRows(table);
-    table.getConnection().clearRegionCache();
-    assertEquals("Clearing cache should have 0 cached ", 0,
-        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
-
-    // A Get is suppose to do a region lookup request
-    Get g = new Get(Bytes.toBytes("aaa"));
-    table.get(g);
-
-    // only one region should be cached if the cache prefetch is disabled.
-    assertEquals("Number of cached region is incorrect ", 1,
-        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
-
-    // now we enable cached prefetch.
-    HTable.setRegionCachePrefetch(conf, TABLENAME, true);
-    assertTrue("The table is enabled for region cache prefetch",
-        HTable.getRegionCachePrefetch(conf, TABLENAME));
-
-    HTable.setRegionCachePrefetch(conf, TABLENAME, false);
-    assertFalse("The table is disabled for region cache prefetch",
-        HTable.getRegionCachePrefetch(conf, TABLENAME));
-
-    HTable.setRegionCachePrefetch(conf, TABLENAME, true);
-    assertTrue("The table is enabled for region cache prefetch",
-        HTable.getRegionCachePrefetch(conf, TABLENAME));
-
-    table.getConnection().clearRegionCache();
-
-    assertEquals("Number of cached region is incorrect ", 0,
-        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
-
-    // if there is a cache miss, some additional regions should be prefetched.
-    Get g2 = new Get(Bytes.toBytes("bbb"));
-    table.get(g2);
-
-    // Get the configured number of cache read-ahead regions.
-    int prefetchRegionNumber = conf.getInt("hbase.client.prefetch.limit", 10);
-
-    // the total number of cached regions == region('aaa") + prefeched regions.
-    LOG.info("Testing how many regions cached");
-    assertEquals("Number of cached region is incorrect ", prefetchRegionNumber,
-        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
-
-    table.getConnection().clearRegionCache();
-
-    Get g3 = new Get(Bytes.toBytes("abc"));
-    table.get(g3);
-    assertEquals("Number of cached region is incorrect ", prefetchRegionNumber,
-        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
-
-    LOG.info("Finishing testRegionCachePreWarm");
-  }
-
+ 
   @Test
   public void testIncrement() throws Exception {
     LOG.info("Starting testIncrement");

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java Wed Apr 27 23:12:42 2011
@@ -20,15 +20,24 @@
 
 package org.apache.hadoop.hbase.coprocessor;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
@@ -41,13 +50,6 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.*;
-
 /**
  * Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.MasterObserver}
  * interface hooks at all appropriate times during normal HMaster operations.
@@ -217,14 +219,14 @@ public class TestMasterObserver {
 
     @Override
     public void preMove(ObserverContext<MasterCoprocessorEnvironment> env,
-        HRegionInfo region, HServerInfo srcServer, HServerInfo destServer)
+        HRegionInfo region, ServerName srcServer, ServerName destServer)
     throws UnknownRegionException {
       preMoveCalled = true;
     }
 
     @Override
     public void postMove(ObserverContext<MasterCoprocessorEnvironment> env, HRegionInfo region,
-        HServerInfo srcServer, HServerInfo destServer)
+        ServerName srcServer, ServerName destServer)
     throws UnknownRegionException {
       postMoveCalled = true;
     }
@@ -445,15 +447,17 @@ public class TestMasterObserver {
 
     Map<HRegionInfo,HServerAddress> regions = table.getRegionsInfo();
     assertFalse(regions.isEmpty());
-    Map.Entry<HRegionInfo,HServerAddress> firstRegion =
+    Map.Entry<HRegionInfo, HServerAddress> firstRegion =
         regions.entrySet().iterator().next();
 
     // try to force a move
-    Collection<HServerInfo> servers = master.getClusterStatus().getServerInfo();
+    Collection<ServerName> servers = master.getClusterStatus().getServers();
     String destName = null;
-    for (HServerInfo info : servers) {
-      if (!info.getServerAddress().equals(firstRegion.getValue())) {
-        destName = info.getServerName();
+    for (ServerName info : servers) {
+      HServerAddress hsa =
+        new HServerAddress(info.getHostname(), info.getPort());
+      if (!hsa.equals(firstRegion.getValue())) {
+        destName = info.toString();
         break;
       }
     }
@@ -471,7 +475,7 @@ public class TestMasterObserver {
     master.balanceSwitch(false);
     // move half the open regions from RS 0 to RS 1
     HRegionServer rs = cluster.getRegionServer(0);
-    byte[] destRS = Bytes.toBytes(cluster.getRegionServer(1).getServerName());
+    byte[] destRS = Bytes.toBytes(cluster.getRegionServer(1).getServerName().toString());
     List<HRegionInfo> openRegions = rs.getOnlineRegions();
     int moveCnt = openRegions.size()/2;
     for (int i=0; i<moveCnt; i++) {

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java Wed Apr 27 23:12:42 2011
@@ -30,9 +30,10 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -67,7 +68,7 @@ public class TestActiveMasterManager {
     } catch(KeeperException.NoNodeException nne) {}
 
     // Create the master node with a dummy address
-    HServerAddress master = new HServerAddress("localhost", 1);
+    ServerName master = new ServerName("localhost", 1, System.currentTimeMillis());
     // Should not have a master yet
     DummyMaster dummyMaster = new DummyMaster();
     ActiveMasterManager activeMasterManager = new ActiveMasterManager(zk,
@@ -106,8 +107,10 @@ public class TestActiveMasterManager {
     } catch(KeeperException.NoNodeException nne) {}
 
     // Create the master node with a dummy address
-    HServerAddress firstMasterAddress = new HServerAddress("localhost", 1);
-    HServerAddress secondMasterAddress = new HServerAddress("localhost", 2);
+    ServerName firstMasterAddress =
+      new ServerName("localhost", 1, System.currentTimeMillis());
+    ServerName secondMasterAddress =
+      new ServerName("localhost", 2, System.currentTimeMillis());
 
     // Should not have a master yet
     DummyMaster ms1 = new DummyMaster();
@@ -177,8 +180,10 @@ public class TestActiveMasterManager {
    * @throws KeeperException
    */
   private void assertMaster(ZooKeeperWatcher zk,
-      HServerAddress expectedAddress) throws KeeperException {
-    HServerAddress readAddress = ZKUtil.getDataAsAddress(zk, zk.masterAddressZNode);
+      ServerName expectedAddress)
+  throws KeeperException {
+    ServerName readAddress =
+      new ServerName(Bytes.toString(ZKUtil.getData(zk, zk.masterAddressZNode)));
     assertNotNull(readAddress);
     assertTrue(expectedAddress.equals(readAddress));
   }
@@ -188,8 +193,7 @@ public class TestActiveMasterManager {
     ActiveMasterManager manager;
     boolean isActiveMaster;
 
-    public WaitToBeMasterThread(ZooKeeperWatcher zk,
-        HServerAddress address) {
+    public WaitToBeMasterThread(ZooKeeperWatcher zk, ServerName address) {
       this.manager = new ActiveMasterManager(zk, address,
           new DummyMaster());
       isActiveMaster = false;
@@ -248,7 +252,7 @@ public class TestActiveMasterManager {
     }
 
     @Override
-    public String getServerName() {
+    public ServerName getServerName() {
       return null;
     }
 

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java Wed Apr 27 23:12:42 2011
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HTableDes
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -84,8 +85,8 @@ public class TestCatalogJanitor {
     }
 
     @Override
-    public String getServerName() {
-      return "mockserver.example.org,1234,-1L";
+    public ServerName getServerName() {
+      return new ServerName("mockserver.example.org", 1234, -1L);
     }
 
     @Override
@@ -161,7 +162,7 @@ public class TestCatalogJanitor {
     }
 
     @Override
-    public String getServerName() {
+    public ServerName getServerName() {
       return null;
     }
 

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java Wed Apr 27 23:12:42 2011
@@ -19,6 +19,8 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import java.net.InetAddress;
+
 import junit.framework.Assert;
 
 import org.apache.commons.logging.Log;
@@ -26,9 +28,8 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClockOutOfSyncException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.Test;
@@ -53,7 +54,7 @@ public class TestClockSkewDetection {
       }
 
       @Override
-      public String getServerName() {
+      public ServerName getServerName() {
         return null;
       }
 
@@ -72,22 +73,20 @@ public class TestClockSkewDetection {
 
       @Override
       public void stop(String why) {
-      }}, null, null);
+      }}, null);
 
     LOG.debug("regionServerStartup 1");
-    HServerInfo hsi1 = new HServerInfo(new HServerAddress("example.org:1234"),
-        System.currentTimeMillis(), -1, "example.com");
-    sm.regionServerStartup(hsi1, System.currentTimeMillis());
+    InetAddress ia1 = InetAddress.getLocalHost();
+    sm.regionServerStartup(ia1, 1234, -1, System.currentTimeMillis());
 
     long maxSkew = 30000;
 
     try {
       LOG.debug("regionServerStartup 2");
-      HServerInfo hsi2 = new HServerInfo(new HServerAddress("example.org:1235"),
-        System.currentTimeMillis(), -1, "example.com");
-      sm.regionServerStartup(hsi2, System.currentTimeMillis() - maxSkew * 2);
+      InetAddress ia2 = InetAddress.getLocalHost();
+      sm.regionServerStartup(ia2, 1235, -1, System.currentTimeMillis() - maxSkew * 2);
       Assert.assertTrue("HMaster should have thrown an ClockOutOfSyncException "
-          + "but didn't.", false);
+        + "but didn't.", false);
     } catch(ClockOutOfSyncException e) {
       //we want an exception
       LOG.info("Recieved expected exception: "+e);

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java Wed Apr 27 23:12:42 2011
@@ -20,34 +20,25 @@ package org.apache.hadoop.hbase.master;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import org.apache.hadoop.hbase.ServerName;
 import org.junit.Test;
 
 
 public class TestDeadServer {
   @Test public void testIsDead() {
-    DeadServer ds = new DeadServer(2);
-    final String hostname123 = "127.0.0.1,123,3";
-    assertFalse(ds.isDeadServer(hostname123, false));
-    assertFalse(ds.isDeadServer(hostname123, true));
+    DeadServer ds = new DeadServer();
+    final ServerName hostname123 = new ServerName("127.0.0.1", 123, 3L);
     ds.add(hostname123);
-    assertTrue(ds.isDeadServer(hostname123, false));
-    assertFalse(ds.isDeadServer("127.0.0.1:1", true));
-    assertFalse(ds.isDeadServer("127.0.0.1:1234", true));
-    assertTrue(ds.isDeadServer("127.0.0.1:123", true));
     assertTrue(ds.areDeadServersInProgress());
     ds.finish(hostname123);
     assertFalse(ds.areDeadServersInProgress());
-    final String hostname1234 = "127.0.0.2,1234,4";
+    final ServerName hostname1234 = new ServerName("127.0.0.2", 1234, 4L);
     ds.add(hostname1234);
-    assertTrue(ds.isDeadServer(hostname123, false));
-    assertTrue(ds.isDeadServer(hostname1234, false));
     assertTrue(ds.areDeadServersInProgress());
     ds.finish(hostname1234);
     assertFalse(ds.areDeadServersInProgress());
-    final String hostname12345 = "127.0.0.2,12345,4";
+    final ServerName hostname12345 = new ServerName("127.0.0.2", 12345, 4L);
     ds.add(hostname12345);
-    assertTrue(ds.isDeadServer(hostname1234, false));
-    assertTrue(ds.isDeadServer(hostname12345, false));
     assertTrue(ds.areDeadServersInProgress());
     ds.finish(hostname12345);
     assertFalse(ds.areDeadServersInProgress());
@@ -55,14 +46,14 @@ public class TestDeadServer {
     // Already dead =       127.0.0.1,9090,112321
     // Coming back alive =  127.0.0.1,9090,223341
 
-    final String deadServer = "127.0.0.1,9090,112321";
+    final ServerName deadServer = new ServerName("127.0.0.1", 9090, 112321L);
     assertFalse(ds.cleanPreviousInstance(deadServer));
     ds.add(deadServer);
     assertTrue(ds.isDeadServer(deadServer));
-    final String deadServerHostComingAlive = "127.0.0.1,9090,112321";
+    final ServerName deadServerHostComingAlive =
+      new ServerName("127.0.0.1", 9090, 112321L);
     assertTrue(ds.cleanPreviousInstance(deadServerHostComingAlive));
     assertFalse(ds.isDeadServer(deadServer));
     assertFalse(ds.cleanPreviousInstance(deadServerHostComingAlive));
-
   }
 }
\ No newline at end of file

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java?rev=1097275&r1=1097274&r2=1097275&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java Wed Apr 27 23:12:42 2011
@@ -19,11 +19,12 @@
  */
 package org.apache.hadoop.hbase.master;
 
-import static org.apache.hadoop.hbase.zookeeper.ZKSplitLog.Counters.*;
-
+import static org.apache.hadoop.hbase.zookeeper.ZKSplitLog.Counters.tot_wkr_final_transistion_failed;
+import static org.apache.hadoop.hbase.zookeeper.ZKSplitLog.Counters.tot_wkr_task_acquired;
+import static org.apache.hadoop.hbase.zookeeper.ZKSplitLog.Counters.tot_wkr_task_err;
+import static org.apache.hadoop.hbase.zookeeper.ZKSplitLog.Counters.tot_wkr_task_resigned;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Iterator;
@@ -62,8 +63,8 @@ import org.apache.hadoop.hbase.zookeeper
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
-import org.junit.Before;
 import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 public class TestDistributedLogSplitting {
@@ -156,7 +157,7 @@ public class TestDistributedLogSplitting
     HRegionServer hrs = rsts.get(0).getRegionServer();
     Path rootdir = FSUtils.getRootDir(conf);
     final Path logDir = new Path(rootdir,
-        HLog.getHLogDirectoryName(hrs.getServerName()));
+        HLog.getHLogDirectoryName(hrs.getServerName().toString()));
 
     installTable(new ZooKeeperWatcher(conf, "table-creation", null),
         "table", "family", 40);
@@ -205,7 +206,7 @@ public class TestDistributedLogSplitting
     HRegionServer hrs = rsts.get(0).getRegionServer();
     Path rootdir = FSUtils.getRootDir(conf);
     final Path logDir = new Path(rootdir,
-        HLog.getHLogDirectoryName(hrs.getServerName()));
+        HLog.getHLogDirectoryName(hrs.getServerName().toString()));
 
     installTable(new ZooKeeperWatcher(conf, "table-creation", null),
         "table", "family", 40);
@@ -253,11 +254,10 @@ public class TestDistributedLogSplitting
     HRegionServer hrs = rsts.get(0).getRegionServer();
     Path rootdir = FSUtils.getRootDir(conf);
     final Path logDir = new Path(rootdir,
-        HLog.getHLogDirectoryName(hrs.getServerName()));
+        HLog.getHLogDirectoryName(hrs.getServerName().toString()));
 
     installTable(new ZooKeeperWatcher(conf, "table-creation", null),
         "table", "family", 40);
-    byte[] table = Bytes.toBytes("table");
     makeHLog(hrs.getWAL(), hrs.getOnlineRegions(), "table",
         NUM_LOG_LINES, 100);
 
@@ -400,11 +400,6 @@ public class TestDistributedLogSplitting
     master.assignmentManager.waitUntilNoRegionsInTransition(60000);
   }
 
-  private void blockUntilRIT(ZooKeeperWatcher zkw)
-  throws KeeperException, InterruptedException {
-    ZKAssign.blockUntilRIT(zkw);
-  }
-
   private void putData(HRegion region, byte[] startRow, int numRows, byte [] qf,
       byte [] ...families)
   throws IOException {



Mime
View raw message