hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From te...@apache.org
Subject svn commit: r1204611 [2/2] - in /hbase/trunk: ./ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/executor/ src/main/java/org/apache/hadoop/hbase/ipc/ src/main/java/org/apache/hadoop/hbase/master/ src/main/java/org/apache/ha...
Date Mon, 21 Nov 2011 17:29:33 GMT
Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/SchemaChangeTracker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/SchemaChangeTracker.java?rev=1204611&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/SchemaChangeTracker.java (added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/SchemaChangeTracker.java Mon Nov 21 17:29:32 2011
@@ -0,0 +1,476 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.zookeeper.KeeperException;
+import org.apache.hadoop.hbase.util.Writables;
+
+import org.apache.hadoop.io.Writable;
+
+import java.io.*;
+import java.util.List;
+
+/**
+ * Region server schema change tracker. RS uses this tracker to keep track of
+ * alter schema requests from master and updates the status once the schema change
+ * is complete.
+ */
+public class SchemaChangeTracker extends ZooKeeperNodeTracker {
+  public static final Log LOG = LogFactory.getLog(SchemaChangeTracker.class);
+  private RegionServerServices regionServer = null;
+  private volatile int sleepTimeMillis = 0;
+
+
+  /**
+   * Constructs a new ZK node tracker.
+   * <p/>
+   * <p>After construction, use {@link #start} to kick off tracking.
+   *
+   * @param watcher
+   * @param node
+   * @param abortable
+   */
+  public SchemaChangeTracker(ZooKeeperWatcher watcher,
+                             Abortable abortable,
+                             RegionServerServices regionServer) {
+    super(watcher, watcher.schemaZNode, abortable);
+    this.regionServer = regionServer;
+  }
+
+  @Override
+  public void start() {
+    try {
+      watcher.registerListener(this);
+      ZKUtil.listChildrenAndWatchThem(watcher, node);
+      // Clean-up old in-process schema changes for this RS now?
+    } catch (KeeperException e) {
+      LOG.error("RegionServer SchemaChangeTracker startup failed with " +
+          "KeeperException.", e);
+    }
+  }
+
+
+  /**
+   * This event will be triggered whenever new schema change request is processed by the
+   * master. The path will be of the format /hbase/schema/<table name>
+   * @param path full path of the node whose children have changed
+   */
+  @Override
+  public void nodeChildrenChanged(String path) {
+    LOG.debug("NodeChildrenChanged. Path = " + path);
+    if (path.equals(watcher.schemaZNode)) {
+      try {
+        List<String> tables =
+          ZKUtil.listChildrenAndWatchThem(watcher, watcher.schemaZNode);
+        LOG.debug("RS.SchemaChangeTracker: " +
+            "Current list of tables with schema change = " + tables);
+        if (tables != null) {
+          handleSchemaChange(tables);
+        } else {
+          LOG.error("No tables found for schema change event." +
+              " Skipping instant schema refresh");
+        }
+      } catch (KeeperException ke) {
+        String errmsg = "KeeperException while handling nodeChildrenChanged for path = "
+            + path + " Cause = " + ke.getCause();
+        LOG.error(errmsg, ke);
+        TaskMonitor.get().createStatus(errmsg);
+      }
+    }
+  }
+
+  private void handleSchemaChange(List<String> tables) {
+    for (String tableName : tables) {
+      if (tableName != null) {
+        LOG.debug("Processing schema change with status for table = " + tableName);
+        handleSchemaChange(tableName);
+      }
+    }
+  }
+
+  private void handleSchemaChange(String tableName) {
+    int refreshedRegionsCount = 0, onlineRegionsCount = 0;
+    MonitoredTask status = null;
+    try {
+      List<HRegion> onlineRegions =
+          regionServer.getOnlineRegions(Bytes.toBytes(tableName));
+      if (onlineRegions != null && !onlineRegions.isEmpty()) {
+        status = TaskMonitor.get().createStatus("Region server "
+             + regionServer.getServerName().getServerName()
+             + " handling schema change for table = " + tableName
+             + " number of online regions = " + onlineRegions.size());
+        onlineRegionsCount = onlineRegions.size();
+        createStateNode(tableName, onlineRegions.size());
+        for (HRegion hRegion : onlineRegions) {
+          regionServer.refreshRegion(hRegion);
+          refreshedRegionsCount ++;
+        }
+        SchemaAlterStatus alterStatus = getSchemaAlterStatus(tableName);
+        alterStatus.update(SchemaAlterStatus.AlterState.SUCCESS, refreshedRegionsCount);
+        updateSchemaChangeStatus(tableName, alterStatus);
+        String msg = "Refresh schema completed for table name = " + tableName
+        + " server = " + regionServer.getServerName().getServerName()
+        + " online Regions = " + onlineRegions.size()
+        + " refreshed Regions = " + refreshedRegionsCount;
+        LOG.debug(msg);
+        status.setStatus(msg);
+      } else {
+        LOG.debug("Server " + regionServer.getServerName().getServerName()
+         + " has no online regions for table = " + tableName
+         + " Ignoring the schema change request");
+      }
+    } catch (IOException ioe) {
+      reportAndLogSchemaRefreshError(tableName, onlineRegionsCount,
+          refreshedRegionsCount, ioe, status);
+    } catch (KeeperException ke) {
+      reportAndLogSchemaRefreshError(tableName, onlineRegionsCount,
+          refreshedRegionsCount, ke, status);
+    }
+  }
+
+  private int getZKNodeVersion(String nodePath) throws KeeperException {
+    return ZKUtil.checkExists(this.watcher, nodePath);
+  }
+
+  private void reportAndLogSchemaRefreshError(String tableName,
+                                              int onlineRegionsCount,
+                                              int refreshedRegionsCount,
+                                              Throwable exception,
+                                              MonitoredTask status) {
+    try {
+      String errmsg =
+          " Region Server " + regionServer.getServerName().getServerName()
+              + " failed during schema change process. Cause = "
+              + exception.getCause()
+              + " Number of onlineRegions = " + onlineRegionsCount
+              + " Processed regions = " + refreshedRegionsCount;
+      SchemaAlterStatus alterStatus = getSchemaAlterStatus(tableName);
+      alterStatus.update(SchemaAlterStatus.AlterState.FAILURE,
+          refreshedRegionsCount, errmsg);
+      String nodePath = getSchemaChangeNodePathForTableAndServer(tableName,
+              regionServer.getServerName().getServerName());
+      ZKUtil.updateExistingNodeData(this.watcher, nodePath,
+          Writables.getBytes(alterStatus), getZKNodeVersion(nodePath));
+      LOG.info("reportAndLogSchemaRefreshError() " +
+          " Updated child ZKNode with SchemaAlterStatus = "
+          + alterStatus + " for table = " + tableName);
+      if (status == null) {
+        status = TaskMonitor.get().createStatus(errmsg);
+      } else {
+        status.setStatus(errmsg);
+      }
+    } catch (KeeperException e) {
+    // Retry ?
+      String errmsg = "KeeperException while updating the schema change node with "
+        + "error status for table = "
+        + tableName + " server = "
+        + regionServer.getServerName().getServerName()
+        + " Cause = " + e.getCause();
+      LOG.error(errmsg, e);
+      TaskMonitor.get().createStatus(errmsg);
+    } catch(IOException ioe) {
+      // retry ??
+      String errmsg = "IOException while updating the schema change node with "
+        + "server name for table = "
+        + tableName + " server = "
+        + regionServer.getServerName().getServerName()
+        + " Cause = " + ioe.getCause();
+      TaskMonitor.get().createStatus(errmsg);
+      LOG.error(errmsg, ioe);
+    }
+  }
+
+
+  private void createStateNode(String tableName, int numberOfOnlineRegions)
+      throws IOException {
+    SchemaAlterStatus sas =
+        new SchemaAlterStatus(regionServer.getServerName().getServerName(),
+            numberOfOnlineRegions);
+    LOG.debug("Creating Schema Alter State node = " + sas);
+    try {
+      ZKUtil.createSetData(this.watcher,
+          getSchemaChangeNodePathForTableAndServer(tableName,
+                regionServer.getServerName().getServerName()),
+                Writables.getBytes(sas));
+    } catch (KeeperException ke) {
+      String errmsg = "KeeperException while creating the schema change node with "
+          + "server name for table = "
+          + tableName + " server = "
+          + regionServer.getServerName().getServerName()
+          + " Message = " + ke.getCause();
+      LOG.error(errmsg, ke);
+      TaskMonitor.get().createStatus(errmsg);
+    }
+
+  }
+
+  private SchemaAlterStatus getSchemaAlterStatus(String tableName)
+      throws KeeperException, IOException {
+    byte[] statusBytes = ZKUtil.getData(this.watcher,
+        getSchemaChangeNodePathForTableAndServer(tableName,
+            regionServer.getServerName().getServerName()));
+    if (statusBytes == null || statusBytes.length <= 0) {
+      return null;
+    }
+    SchemaAlterStatus sas = new SchemaAlterStatus();
+    Writables.getWritable(statusBytes, sas);
+    return sas;
+  }
+
+  private void updateSchemaChangeStatus(String tableName,
+                                     SchemaAlterStatus schemaAlterStatus)
+      throws KeeperException, IOException {
+    try {
+      if(sleepTimeMillis > 0) {
+        try {
+          LOG.debug("SchemaChangeTracker sleeping for "
+              + sleepTimeMillis);
+          Thread.sleep(sleepTimeMillis);
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+        }
+      }
+      ZKUtil.updateExistingNodeData(this.watcher,
+          getSchemaChangeNodePathForTableAndServer(tableName,
+              regionServer.getServerName().getServerName()),
+          Writables.getBytes(schemaAlterStatus), -1);
+      String msg = "Schema change tracker completed for table = " + tableName
+              + " status = " + schemaAlterStatus;
+      LOG.debug(msg);
+      TaskMonitor.get().createStatus(msg);
+    } catch (KeeperException.NoNodeException e) {
+      String errmsg = "KeeperException.NoNodeException while updating the schema "
+          + "change node with server name for table = "
+          + tableName + " server = "
+          + regionServer.getServerName().getServerName()
+          + " Cause = " + e.getCause();
+      TaskMonitor.get().createStatus(errmsg);
+      LOG.error(errmsg, e);
+    } catch (KeeperException e) {
+      // Retry ?
+      String errmsg = "KeeperException while updating the schema change node with "
+          + "server name for table = "
+          + tableName + " server = "
+          + regionServer.getServerName().getServerName()
+          + " Cause = " + e.getCause();
+      LOG.error(errmsg, e);
+      TaskMonitor.get().createStatus(errmsg);
+    } catch(IOException ioe) {
+      String errmsg = "IOException while updating the schema change node with "
+          + "server name for table = "
+          + tableName + " server = "
+          + regionServer.getServerName().getServerName()
+          + " Cause = " + ioe.getCause();
+      LOG.error(errmsg, ioe);
+      TaskMonitor.get().createStatus(errmsg);
+    }
+  }
+
+  private String getSchemaChangeNodePathForTable(String tableName) {
+    return ZKUtil.joinZNode(watcher.schemaZNode, tableName);
+  }
+
+  private String getSchemaChangeNodePathForTableAndServer(
+      String tableName, String regionServerName) {
+    return ZKUtil.joinZNode(getSchemaChangeNodePathForTable(tableName),
+        regionServerName);
+  }
+
+  public int getSleepTimeMillis() {
+    return sleepTimeMillis;
+  }
+
+  /**
+   * Set a sleep time in millis before this RS can update it's progress status.
+   * Used only for test cases to test complex test scenarios such as RS failures and
+   * RS exemption handling.
+   * @param sleepTimeMillis
+   */
+  public void setSleepTimeMillis(int sleepTimeMillis) {
+    this.sleepTimeMillis = sleepTimeMillis;
+  }
+
+  /**
+   * Check whether there are any schema change requests that are in progress now for the given table.
+   * We simply assume that a schema change is in progress if we see a ZK schema node this
+   * any table. We may revisit for fine grained checks such as check the current alter status
+   * et al, but it is not required now.
+   * @return
+   */
+  public boolean isSchemaChangeInProgress(String tableName) {
+    try {
+      List<String> schemaChanges = ZKUtil.listChildrenAndWatchThem(this.watcher,
+          watcher.schemaZNode);
+      if (schemaChanges != null) {
+        for (String alterTableName : schemaChanges) {
+          if (alterTableName.equals(tableName)) {
+            return true;
+          }
+        }
+        return false;
+      }
+    } catch (KeeperException ke) {
+      LOG.debug("isSchemaChangeInProgress. " +
+          "KeeperException while getting current schema change progress.");
+      return false;
+    }
+    return false;
+  }
+
+  /**
+   * Holds the current alter state for a table. Alter state includes the
+   * current alter status (INPROCESS, FAILURE, SUCCESS, or IGNORED, current RS
+   * host name, timestamp of alter request, number of online regions this RS has for
+   * the given table, number of processed regions and an errorCause in case
+   * if the RS failed during the schema change process.
+   *
+   * RS keeps track of schema change requests per table using the alter status and
+   * periodically updates the alter status based on schema change status.
+   */
+  public static class SchemaAlterStatus implements Writable {
+
+    public enum AlterState {
+      INPROCESS,        // Inprocess alter
+      SUCCESS,          // completed alter
+      FAILURE,          // failure alter
+      IGNORED           // Ignore the alter processing.
+    }
+
+    private AlterState currentAlterStatus;
+    // TimeStamp
+    private long stamp;
+    private int numberOfOnlineRegions;
+    private String errorCause = " ";
+    private String hostName;
+    private int numberOfRegionsProcessed = 0;
+
+    public SchemaAlterStatus() {
+
+    }
+
+    public SchemaAlterStatus(String hostName, int numberOfOnlineRegions) {
+      this.numberOfOnlineRegions = numberOfOnlineRegions;
+      this.stamp = System.currentTimeMillis();
+      this.currentAlterStatus = AlterState.INPROCESS;
+      //this.rsToProcess = activeHosts;
+      this.hostName = hostName;
+    }
+
+    public AlterState getCurrentAlterStatus() {
+      return currentAlterStatus;
+    }
+
+    public void setCurrentAlterStatus(AlterState currentAlterStatus) {
+      this.currentAlterStatus = currentAlterStatus;
+    }
+
+    public int getNumberOfOnlineRegions() {
+      return numberOfOnlineRegions;
+    }
+
+    public void setNumberOfOnlineRegions(int numberOfRegions) {
+      this.numberOfOnlineRegions = numberOfRegions;
+    }
+
+    public int getNumberOfRegionsProcessed() {
+      return numberOfRegionsProcessed;
+    }
+
+    public void setNumberOfRegionsProcessed(int numberOfRegionsProcessed) {
+      this.numberOfRegionsProcessed = numberOfRegionsProcessed;
+    }
+
+    public String getErrorCause() {
+      return errorCause;
+    }
+
+    public void setErrorCause(String errorCause) {
+      this.errorCause = errorCause;
+    }
+
+    public String getHostName() {
+      return hostName;
+    }
+
+    public void setHostName(String hostName) {
+      this.hostName = hostName;
+    }
+
+    public void update(AlterState state, int numberOfRegions, String errorCause) {
+      this.currentAlterStatus = state;
+      this.numberOfRegionsProcessed = numberOfRegions;
+      this.errorCause = errorCause;
+    }
+
+    public void update(AlterState state, int numberOfRegions) {
+      this.currentAlterStatus = state;
+      this.numberOfRegionsProcessed = numberOfRegions;
+    }
+
+    public void update(AlterState state) {
+      this.currentAlterStatus = state;
+    }
+
+    public void update(SchemaAlterStatus status) {
+      this.currentAlterStatus = status.getCurrentAlterStatus();
+      this.numberOfRegionsProcessed = status.getNumberOfRegionsProcessed();
+      this.errorCause = status.getErrorCause();
+    }
+
+    @Override
+    public void readFields(DataInput in) throws IOException {
+      currentAlterStatus = AlterState.valueOf(in.readUTF());
+      stamp = in.readLong();
+      numberOfOnlineRegions = in.readInt();
+      hostName = Bytes.toString(Bytes.readByteArray(in));
+      numberOfRegionsProcessed = in.readInt();
+      errorCause = Bytes.toString(Bytes.readByteArray(in));
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+      out.writeUTF(currentAlterStatus.name());
+      out.writeLong(stamp);
+      out.writeInt(numberOfOnlineRegions);
+      Bytes.writeByteArray(out, Bytes.toBytes(hostName));
+      out.writeInt(numberOfRegionsProcessed);
+      Bytes.writeByteArray(out, Bytes.toBytes(errorCause));
+    }
+
+    @Override
+    public String toString() {
+      return
+         " state= " + currentAlterStatus
+        + ", ts= " + stamp
+        + ", number of online regions = " + numberOfOnlineRegions
+        + ", host= " + hostName + " processed regions = " + numberOfRegionsProcessed
+        + ", errorCause = " + errorCause;
+    }
+  }
+
+}

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java?rev=1204611&r1=1204610&r2=1204611&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java Mon Nov 21 17:29:32 2011
@@ -100,6 +100,8 @@ public class ZooKeeperWatcher implements
   public String clusterIdZNode;
   // znode used for log splitting work assignment
   public String splitLogZNode;
+  // znode used to record table schema changes
+  public String schemaZNode;
 
   // Certain ZooKeeper nodes need to be world-readable
   public static final ArrayList<ACL> CREATOR_ALL_AND_WORLD_READABLE =
@@ -162,6 +164,7 @@ public class ZooKeeperWatcher implements
       ZKUtil.createAndFailSilent(this, drainingZNode);
       ZKUtil.createAndFailSilent(this, tableZNode);
       ZKUtil.createAndFailSilent(this, splitLogZNode);
+      ZKUtil.createAndFailSilent(this, schemaZNode);
     } catch (KeeperException e) {
       throw new ZooKeeperConnectionException(
           prefix("Unexpected KeeperException creating base node"), e);
@@ -211,6 +214,8 @@ public class ZooKeeperWatcher implements
         conf.get("zookeeper.znode.clusterId", "hbaseid"));
     splitLogZNode = ZKUtil.joinZNode(baseZNode,
         conf.get("zookeeper.znode.splitlog", HConstants.SPLIT_LOGDIR_NAME));
+    schemaZNode = ZKUtil.joinZNode(baseZNode,
+                conf.get("zookeeper.znode.schema", "schema"));
   }
 
   /**

Modified: hbase/trunk/src/main/resources/hbase-default.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/resources/hbase-default.xml?rev=1204611&r1=1204610&r2=1204611&view=diff
==============================================================================
--- hbase/trunk/src/main/resources/hbase-default.xml (original)
+++ hbase/trunk/src/main/resources/hbase-default.xml Mon Nov 21 17:29:32 2011
@@ -771,15 +771,39 @@
     </description>
   </property>
   <property>
-      <name>hbase.coprocessor.abortonerror</name>
-      <value>false</value>
-      <description>
-      Set to true to cause the hosting server (master or regionserver) to
-      abort if a coprocessor throws a Throwable object that is not IOException or
-      a subclass of IOException. Setting it to true might be useful in development
-      environments where one wants to terminate the server as soon as possible to
-      simplify coprocessor failure analysis.
-      </description>
+    <name>hbase.coprocessor.abortonerror</name>
+    <value>false</value>
+    <description>
+    Set to true to cause the hosting server (master or regionserver) to
+    abort if a coprocessor throws a Throwable object that is not IOException or
+    a subclass of IOException. Setting it to true might be useful in development
+    environments where one wants to terminate the server as soon as possible to
+    simplify coprocessor failure analysis.
+    </description>
+  </property>
+  <property>
+    <name>hbase.instant.schema.alter.enabled</name>
+    <value>false</value>
+    <description>Whether or not to handle alter schema changes instantly or not.
+    If enabled, all schema change alter operations will be instant, as the master will not
+    explicitly unassign/assign the impacted regions and instead will rely on Region servers to
+    refresh their schema changes. If enabled, the schema alter requests will survive
+    master or RS failures.
+    </description>
+  </property>
+  <property>
+    <name>hbase.instant.schema.janitor.period</name>
+    <value>120000</value>
+    <description>The Schema Janitor process wakes up every millis and sweeps all
+    expired/failed schema change requests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.instant.schema.alter.timeout</name>
+    <value>60000</value>
+    <description>Timeout in millis after which any pending schema alter request will be
+    considered as failed.
+    </description>
   </property>
   <property>
     <name>hbase.online.schema.update.enable</name>

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/InstantSchemaChangeTestBase.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/InstantSchemaChangeTestBase.java?rev=1204611&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/InstantSchemaChangeTestBase.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/InstantSchemaChangeTestBase.java Mon Nov 21 17:29:32 2011
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.apache.hadoop.hbase.zookeeper.MasterSchemaChangeTracker;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class InstantSchemaChangeTestBase {
+
+  final Log LOG = LogFactory.getLog(getClass());
+  protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  protected HBaseAdmin admin;
+  protected static MiniHBaseCluster miniHBaseCluster = null;
+  protected Configuration conf;
+  protected static MasterSchemaChangeTracker msct = null;
+
+  protected final byte [] row = Bytes.toBytes("row");
+  protected final byte [] qualifier = Bytes.toBytes("qualifier");
+  final byte [] value = Bytes.toBytes("value");
+
+  @Before
+  public void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.instant.schema.alter.enabled", true);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
+    TEST_UTIL.getConfiguration().setInt("hbase.instant.schema.janitor.period", 10000);
+    TEST_UTIL.getConfiguration().setInt("hbase.instant.schema.alter.timeout", 30000);
+    //
+    miniHBaseCluster = TEST_UTIL.startMiniCluster(2,5);
+    msct = TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+    this.admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
+
+  }
+
+  @After
+  public void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Find the RS that is currently holding our online region.
+   * @param tableName
+   * @return
+   */
+  protected HRegionServer findRSWithOnlineRegionFor(String tableName) {
+    List<JVMClusterUtil.RegionServerThread> rsThreads =
+        miniHBaseCluster.getLiveRegionServerThreads();
+    for (JVMClusterUtil.RegionServerThread rsT : rsThreads) {
+      HRegionServer rs = rsT.getRegionServer();
+      List<HRegion> regions = rs.getOnlineRegions(Bytes.toBytes(tableName));
+      if (regions != null && !regions.isEmpty()) {
+        return rs;
+      }
+    }
+    return null;
+  }
+
+  protected void waitForSchemaChangeProcess(final String tableName)
+      throws KeeperException, InterruptedException {
+    waitForSchemaChangeProcess(tableName, 10000);
+  }
+
+  /**
+   * This a pretty low cost signalling mechanism. It is quite possible that we will
+   * miss out the ZK node creation signal as in some cases the schema change process
+   * happens rather quickly and our thread waiting for ZK node creation might wait forver.
+   * The fool-proof strategy would be to directly listen for ZK events.
+   * @param tableName
+   * @throws KeeperException
+   * @throws InterruptedException
+   */
+  protected void waitForSchemaChangeProcess(final String tableName, final long waitTimeMills)
+      throws KeeperException, InterruptedException {
+    LOG.info("Waiting for ZK node creation for table = " + tableName);
+    final MasterSchemaChangeTracker msct =
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+    final Runnable r = new Runnable() {
+      public void run() {
+        try {
+          while(!msct.doesSchemaChangeNodeExists(tableName)) {
+            try {
+              Thread.sleep(50);
+            } catch (InterruptedException e) {
+              Thread.currentThread().interrupt();
+            }
+          }
+        } catch (KeeperException ke) {
+            ke.printStackTrace();
+        }
+        LOG.info("Waiting for ZK node deletion for table = " + tableName);
+        try {
+          while(msct.doesSchemaChangeNodeExists(tableName)) {
+            try {
+              Thread.sleep(50);
+            } catch (InterruptedException e) {
+               Thread.currentThread().interrupt();
+            }
+          }
+        }  catch (KeeperException ke) {
+            ke.printStackTrace();
+        }
+      }
+    };
+    Thread t = new Thread(r);
+    t.start();
+    if (waitTimeMills > 0) {
+      t.join(waitTimeMills);
+    }  else {
+      t.join(10000);
+    }
+  }
+
+  protected HTable createTableAndValidate(String tableName) throws IOException {
+    conf = TEST_UTIL.getConfiguration();
+    LOG.info("Start createTableAndValidate()");
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+    HTableDescriptor[] tables = admin.listTables();
+    int numTables = 0;
+    if (tables != null) {
+      numTables = tables.length;
+    }
+    HTable ht = TEST_UTIL.createTable(Bytes.toBytes(tableName),
+      HConstants.CATALOG_FAMILY);
+    tables = this.admin.listTables();
+    assertEquals(numTables + 1, tables.length);
+    LOG.info("created table = " + tableName);
+    return ht;
+  }
+
+}
\ No newline at end of file

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChange.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChange.java?rev=1204611&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChange.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChange.java Mon Nov 21 17:29:32 2011
@@ -0,0 +1,465 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MasterSchemaChangeTracker;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestInstantSchemaChange extends InstantSchemaChangeTestBase {
+
+  final Log LOG = LogFactory.getLog(getClass());
+
+  @Test
+  public void testInstantSchemaChangeForModifyTable() throws IOException,
+      KeeperException, InterruptedException {
+
+    String tableName = "testInstantSchemaChangeForModifyTable";
+    conf = TEST_UTIL.getConfiguration();
+    LOG.info("Start testInstantSchemaChangeForModifyTable()");
+    HTable ht = createTableAndValidate(tableName);
+
+    String newFamily = "newFamily";
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    htd.addFamily(new HColumnDescriptor(newFamily));
+
+    admin.modifyTable(Bytes.toBytes(tableName), htd);
+    waitForSchemaChangeProcess(tableName);
+    assertFalse(msct.doesSchemaChangeNodeExists(tableName));
+
+    Put put1 = new Put(row);
+    put1.add(Bytes.toBytes(newFamily), qualifier, value);
+    ht.put(put1);
+
+    Get get1 = new Get(row);
+    get1.addColumn(Bytes.toBytes(newFamily), qualifier);
+    Result r = ht.get(get1);
+    byte[] tvalue = r.getValue(Bytes.toBytes(newFamily), qualifier);
+    int result = Bytes.compareTo(value, tvalue);
+    assertEquals(result, 0);
+    LOG.info("END testInstantSchemaChangeForModifyTable()");
+
+  }
+
+  @Test
+  public void testInstantSchemaChangeForAddColumn() throws IOException,
+      KeeperException, InterruptedException {
+    LOG.info("Start testInstantSchemaChangeForAddColumn() ");
+    String tableName = "testSchemachangeForAddColumn";
+    HTable ht = createTableAndValidate(tableName);
+    String newFamily = "newFamily";
+    HColumnDescriptor hcd = new HColumnDescriptor("newFamily");
+
+    admin.addColumn(Bytes.toBytes(tableName), hcd);
+    waitForSchemaChangeProcess(tableName);
+    assertFalse(msct.doesSchemaChangeNodeExists(tableName));
+
+    Put put1 = new Put(row);
+    put1.add(Bytes.toBytes(newFamily), qualifier, value);
+    LOG.info("******** Put into new column family ");
+    ht.put(put1);
+
+    Get get1 = new Get(row);
+    get1.addColumn(Bytes.toBytes(newFamily), qualifier);
+    Result r = ht.get(get1);
+    byte[] tvalue = r.getValue(Bytes.toBytes(newFamily), qualifier);
+    LOG.info(" Value put = " + value + " value from table = " + tvalue);
+    int result = Bytes.compareTo(value, tvalue);
+    assertEquals(result, 0);
+    LOG.info("End testInstantSchemaChangeForAddColumn() ");
+
+ }
+
+  @Test
+  public void testInstantSchemaChangeForModifyColumn() throws IOException,
+      KeeperException, InterruptedException {
+    LOG.info("Start testInstantSchemaChangeForModifyColumn() ");
+    String tableName = "testSchemachangeForModifyColumn";
+    createTableAndValidate(tableName);
+
+    HColumnDescriptor hcd = new HColumnDescriptor(HConstants.CATALOG_FAMILY);
+    hcd.setMaxVersions(99);
+    hcd.setBlockCacheEnabled(false);
+
+    admin.modifyColumn(Bytes.toBytes(tableName), hcd);
+    waitForSchemaChangeProcess(tableName);
+    assertFalse(msct.doesSchemaChangeNodeExists(tableName));
+
+    List<HRegion> onlineRegions
+        = miniHBaseCluster.getRegions(Bytes.toBytes("testSchemachangeForModifyColumn"));
+    for (HRegion onlineRegion : onlineRegions) {
+      HTableDescriptor htd = onlineRegion.getTableDesc();
+      HColumnDescriptor tableHcd = htd.getFamily(HConstants.CATALOG_FAMILY);
+      assertTrue(tableHcd.isBlockCacheEnabled() == false);
+      assertEquals(tableHcd.getMaxVersions(), 99);
+    }
+    LOG.info("End testInstantSchemaChangeForModifyColumn() ");
+
+ }
+
+  @Test
+  public void testInstantSchemaChangeForDeleteColumn() throws IOException,
+      KeeperException, InterruptedException {
+    LOG.info("Start testInstantSchemaChangeForDeleteColumn() ");
+    String tableName = "testSchemachangeForDeleteColumn";
+    int numTables = 0;
+    HTableDescriptor[] tables = admin.listTables();
+    if (tables != null) {
+      numTables = tables.length;
+    }
+
+    byte[][] FAMILIES = new byte[][] {
+      Bytes.toBytes("A"), Bytes.toBytes("B"), Bytes.toBytes("C") };
+
+    HTable ht = TEST_UTIL.createTable(Bytes.toBytes(tableName),
+      FAMILIES);
+    tables = this.admin.listTables();
+    assertEquals(numTables + 1, tables.length);
+    LOG.info("Table testSchemachangeForDeleteColumn created");
+
+    admin.deleteColumn(tableName, "C");
+
+    waitForSchemaChangeProcess(tableName);
+    assertFalse(msct.doesSchemaChangeNodeExists(tableName));
+    HTableDescriptor modifiedHtd = this.admin.getTableDescriptor(Bytes.toBytes(tableName));
+    HColumnDescriptor hcd = modifiedHtd.getFamily(Bytes.toBytes("C"));
+    assertTrue(hcd == null);
+    LOG.info("End testInstantSchemaChangeForDeleteColumn() ");
+ }
+
+  @Test
+  public void testInstantSchemaChangeWhenTableIsNotEnabled() throws IOException,
+      KeeperException {
+    final String tableName = "testInstantSchemaChangeWhenTableIsDisabled";
+    conf = TEST_UTIL.getConfiguration();
+    LOG.info("Start testInstantSchemaChangeWhenTableIsDisabled()");
+    HTable ht = createTableAndValidate(tableName);
+    // Disable table
+    admin.disableTable("testInstantSchemaChangeWhenTableIsDisabled");
+    // perform schema changes
+    HColumnDescriptor hcd = new HColumnDescriptor("newFamily");
+    admin.addColumn(Bytes.toBytes(tableName), hcd);
+    MasterSchemaChangeTracker msct =
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+    assertTrue(msct.doesSchemaChangeNodeExists(tableName) == false);
+  }
+
+  /**
+   * Test that when concurrent alter requests are received for a table we don't miss any.
+   * @throws IOException
+   * @throws KeeperException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testConcurrentInstantSchemaChangeForAddColumn() throws IOException,
+      KeeperException, InterruptedException {
+    final String tableName = "testConcurrentInstantSchemaChangeForModifyTable";
+    conf = TEST_UTIL.getConfiguration();
+    LOG.info("Start testConcurrentInstantSchemaChangeForModifyTable()");
+    HTable ht = createTableAndValidate(tableName);
+
+    Runnable run1 = new Runnable() {
+      public void run() {
+        HColumnDescriptor hcd = new HColumnDescriptor("family1");
+        try {
+          admin.addColumn(Bytes.toBytes(tableName), hcd);
+        } catch (IOException ioe) {
+          ioe.printStackTrace();
+
+        }
+      }
+    };
+    Runnable run2 = new Runnable() {
+      public void run() {
+        HColumnDescriptor hcd = new HColumnDescriptor("family2");
+        try {
+          admin.addColumn(Bytes.toBytes(tableName), hcd);
+        } catch (IOException ioe) {
+          ioe.printStackTrace();
+
+        }
+      }
+    };
+
+    run1.run();
+    // We have to add a sleep here as in concurrent scenarios the HTD update
+    // in HDFS fails and returns with null HTD. This needs to be investigated,
+    // but it doesn't impact the instant alter functionality in any way.
+    Thread.sleep(100);
+    run2.run();
+
+    waitForSchemaChangeProcess(tableName);
+
+    Put put1 = new Put(row);
+    put1.add(Bytes.toBytes("family1"), qualifier, value);
+    ht.put(put1);
+
+    Get get1 = new Get(row);
+    get1.addColumn(Bytes.toBytes("family1"), qualifier);
+    Result r = ht.get(get1);
+    byte[] tvalue = r.getValue(Bytes.toBytes("family1"), qualifier);
+    int result = Bytes.compareTo(value, tvalue);
+    assertEquals(result, 0);
+    Thread.sleep(10000);
+
+    Put put2 = new Put(row);
+    put2.add(Bytes.toBytes("family2"), qualifier, value);
+    ht.put(put2);
+
+    Get get2 = new Get(row);
+    get2.addColumn(Bytes.toBytes("family2"), qualifier);
+    Result r2 = ht.get(get2);
+    byte[] tvalue2 = r2.getValue(Bytes.toBytes("family2"), qualifier);
+    int result2 = Bytes.compareTo(value, tvalue2);
+    assertEquals(result2, 0);
+    LOG.info("END testConcurrentInstantSchemaChangeForModifyTable()");
+  }
+
+  /**
+   * The schema change request blocks while a LB run is in progress. This
+   * test validates this behavior.
+   * @throws IOException
+   * @throws InterruptedException
+   * @throws KeeperException
+   */
+  @Test
+  public void testConcurrentInstantSchemaChangeAndLoadBalancerRun() throws IOException,
+      InterruptedException, KeeperException {
+    final String tableName = "testInstantSchemaChangeWithLoadBalancerRunning";
+    conf = TEST_UTIL.getConfiguration();
+    LOG.info("Start testInstantSchemaChangeWithLoadBalancerRunning()");
+    final String newFamily = "newFamily";
+    HTable ht = createTableAndValidate(tableName);
+    final MasterSchemaChangeTracker msct =
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+
+
+    Runnable balancer = new Runnable() {
+      public void run() {
+        // run the balancer now.
+        miniHBaseCluster.getMaster().balance();
+      }
+    };
+
+    Runnable schemaChanger = new Runnable() {
+      public void run() {
+        HColumnDescriptor hcd = new HColumnDescriptor(newFamily);
+        try {
+          admin.addColumn(Bytes.toBytes(tableName), hcd);
+        } catch (IOException ioe) {
+          ioe.printStackTrace();
+
+        }
+      }
+    };
+
+    balancer.run();
+    schemaChanger.run();
+    waitForSchemaChangeProcess(tableName, 40000);
+    assertFalse(msct.doesSchemaChangeNodeExists(tableName));
+
+    Put put1 = new Put(row);
+    put1.add(Bytes.toBytes(newFamily), qualifier, value);
+    LOG.info("******** Put into new column family ");
+    ht.put(put1);
+    ht.flushCommits();
+
+    LOG.info("******** Get from new column family ");
+    Get get1 = new Get(row);
+    get1.addColumn(Bytes.toBytes(newFamily), qualifier);
+    Result r = ht.get(get1);
+    byte[] tvalue = r.getValue(Bytes.toBytes(newFamily), qualifier);
+    LOG.info(" Value put = " + value + " value from table = " + tvalue);
+    int result = Bytes.compareTo(value, tvalue);
+    assertEquals(result, 0);
+
+    LOG.info("End testInstantSchemaChangeWithLoadBalancerRunning() ");
+  }
+
+
+  /**
+   * This test validates two things. One is that the LoadBalancer does not run when a schema
+   * change process is in progress. The second thing is that it also checks that failed/expired
+   * schema changes are expired to unblock the load balancer run.
+   *
+   */
+  @Test (timeout=70000)
+  public void testLoadBalancerBlocksDuringSchemaChangeRequests() throws KeeperException,
+      IOException, InterruptedException {
+    LOG.info("Start testConcurrentLoadBalancerSchemaChangeRequests() ");
+    final MasterSchemaChangeTracker msct =
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+    // Test that the load balancer does not run while an in-flight schema
+    // change operation is in progress.
+    // Simulate a new schema change request.
+    msct.createSchemaChangeNode("testLoadBalancerBlocks", 0);
+    // The schema change node is created.
+    assertTrue(msct.doesSchemaChangeNodeExists("testLoadBalancerBlocks"));
+    // Now, request an explicit LB run.
+
+    Runnable balancer1 = new Runnable() {
+      public void run() {
+        // run the balancer now.
+        miniHBaseCluster.getMaster().balance();
+      }
+    };
+    balancer1.run();
+
+    // Load balancer should not run now.
+    assertTrue(miniHBaseCluster.getMaster().isLoadBalancerRunning() == false);
+    LOG.debug("testConcurrentLoadBalancerSchemaChangeRequests Asserted");
+    LOG.info("End testConcurrentLoadBalancerSchemaChangeRequests() ");
+  }
+
+  /**
+   * Test that instant schema change blocks while LB is running.
+   * @throws KeeperException
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  @Test (timeout=10000)
+  public void testInstantSchemaChangeBlocksDuringLoadBalancerRun() throws KeeperException,
+      IOException, InterruptedException {
+    final MasterSchemaChangeTracker msct =
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+
+    final String tableName = "testInstantSchemaChangeBlocksDuringLoadBalancerRun";
+    conf = TEST_UTIL.getConfiguration();
+    LOG.info("Start testInstantSchemaChangeBlocksDuringLoadBalancerRun()");
+    final String newFamily = "newFamily";
+    createTableAndValidate(tableName);
+
+    // Test that the schema change request does not run while an in-flight LB run
+    // is in progress.
+    // First, request an explicit LB run.
+
+    Runnable balancer1 = new Runnable() {
+      public void run() {
+        // run the balancer now.
+        miniHBaseCluster.getMaster().balance();
+      }
+    };
+
+    Runnable schemaChanger = new Runnable() {
+      public void run() {
+        HColumnDescriptor hcd = new HColumnDescriptor(newFamily);
+        try {
+          admin.addColumn(Bytes.toBytes(tableName), hcd);
+        } catch (IOException ioe) {
+          ioe.printStackTrace();
+
+        }
+      }
+    };
+
+    Thread t1 = new Thread(balancer1);
+    Thread t2 = new Thread(schemaChanger);
+    t1.start();
+    t2.start();
+
+    // check that they both happen concurrently
+    Runnable balancerCheck = new Runnable() {
+      public void run() {
+        // check whether balancer is running.
+        while(!miniHBaseCluster.getMaster().isLoadBalancerRunning()) {
+          try {
+            Thread.sleep(10);
+          } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+          }
+        }
+        try {
+           assertFalse(msct.doesSchemaChangeNodeExists("testSchemaChangeBlocks"));
+        } catch (KeeperException ke) {
+          ke.printStackTrace();
+        }
+        LOG.debug("Load Balancer is now running or skipped");
+        while(miniHBaseCluster.getMaster().isLoadBalancerRunning()) {
+          try {
+            Thread.sleep(10);
+          } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+          }
+        }
+        assertTrue(miniHBaseCluster.getMaster().isLoadBalancerRunning() == false);
+        try {
+          assertTrue(msct.doesSchemaChangeNodeExists("testSchemaChangeBlocks"));
+        } catch (KeeperException ke) {
+
+        }
+
+      }
+    };
+
+    Thread t = new Thread(balancerCheck);
+    t.start();
+    t.join(1000);
+    // Load balancer should not run now.
+    //assertTrue(miniHBaseCluster.getMaster().isLoadBalancerRunning() == false);
+    // Schema change request node should now exist.
+   // assertTrue(msct.doesSchemaChangeNodeExists("testSchemaChangeBlocks"));
+    LOG.debug("testInstantSchemaChangeBlocksDuringLoadBalancerRun Asserted");
+    LOG.info("End testInstantSchemaChangeBlocksDuringLoadBalancerRun() ");
+  }
+
+  /**
+   * To test the schema janitor (that it cleans expired/failed schema alter attempts) we
+   * simply create a fake table (that doesn't exist, with fake number of online regions) in ZK.
+   * This schema alter request will time out (after 30 seconds) and our janitor will clean it up.
+   * regions
+   * @throws IOException
+   * @throws KeeperException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testInstantSchemaJanitor() throws IOException,
+      KeeperException, InterruptedException {
+    LOG.info("testInstantSchemaWithFailedExpiredOperations() ");
+    String fakeTableName = "testInstantSchemaWithFailedExpiredOperations";
+    MasterSchemaChangeTracker msct =
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+    msct.createSchemaChangeNode(fakeTableName, 10);
+    LOG.debug(msct.getSchemaChangeNodePathForTable(fakeTableName)
+        + " created");
+    Thread.sleep(40000);
+    assertFalse(msct.doesSchemaChangeNodeExists(fakeTableName));
+    LOG.debug(msct.getSchemaChangeNodePathForTable(fakeTableName)
+        + " deleted");
+    LOG.info("END testInstantSchemaWithFailedExpiredOperations() ");
+  }
+
+
+}
\ No newline at end of file

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChangeFailover.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChangeFailover.java?rev=1204611&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChangeFailover.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChangeFailover.java Mon Nov 21 17:29:32 2011
@@ -0,0 +1,303 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MasterSchemaChangeTracker;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestInstantSchemaChangeFailover {
+
+  final Log LOG = LogFactory.getLog(getClass());
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private HBaseAdmin admin;
+  private static MiniHBaseCluster miniHBaseCluster = null;
+  private Configuration conf;
+  private ZooKeeperWatcher zkw;
+  private static MasterSchemaChangeTracker msct = null;
+
+  private final byte [] row = Bytes.toBytes("row");
+  private final byte [] qualifier = Bytes.toBytes("qualifier");
+  final byte [] value = Bytes.toBytes("value");
+
+  @Before
+  public void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.instant.schema.alter.enabled", true);
+    TEST_UTIL.getConfiguration().setInt("hbase.instant.schema.janitor.period", 10000);
+    TEST_UTIL.getConfiguration().setInt("hbase.instant.schema.alter.timeout", 30000);
+    //
+    miniHBaseCluster = TEST_UTIL.startMiniCluster(2,5);
+    msct = TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+    this.admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
+  }
+
+  @After
+  public void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * This a pretty low cost signalling mechanism. It is quite possible that we will
+   * miss out the ZK node creation signal as in some cases the schema change process
+   * happens rather quickly and our thread waiting for ZK node creation might wait forver.
+   * The fool-proof strategy would be to directly listen for ZK events.
+   * @param tableName
+   * @throws KeeperException
+   * @throws InterruptedException
+   */
+  private void waitForSchemaChangeProcess(final String tableName)
+      throws KeeperException, InterruptedException {
+    LOG.info("Waiting for ZK node creation for table = " + tableName);
+    final MasterSchemaChangeTracker msct =
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+    final Runnable r = new Runnable() {
+      public void run() {
+        try {
+          while(!msct.doesSchemaChangeNodeExists(tableName)) {
+            try {
+              Thread.sleep(20);
+            } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            }
+          }
+        } catch (KeeperException ke) {
+            ke.printStackTrace();
+        }
+
+        LOG.info("Waiting for ZK node deletion for table = " + tableName);
+        try {
+          while(msct.doesSchemaChangeNodeExists(tableName)) {
+            try {
+              Thread.sleep(20);
+            } catch (InterruptedException e) {
+              Thread.currentThread().interrupt();
+            }
+          }
+        } catch (KeeperException ke) {
+         ke.printStackTrace();
+        }
+      }
+    };
+    Thread t = new Thread(r);
+    t.start();
+    t.join(10000);
+  }
+
+
+  /**
+   * Kill a random RS and see that the schema change can succeed.
+   * @throws IOException
+   * @throws KeeperException
+   * @throws InterruptedException
+   */
+  @Test (timeout=50000)
+  public void testInstantSchemaChangeWhileRSCrash() throws IOException,
+      KeeperException, InterruptedException {
+    LOG.info("Start testInstantSchemaChangeWhileRSCrash()");
+    zkw = miniHBaseCluster.getMaster().getZooKeeperWatcher();
+
+    final String tableName = "TestRSCrashDuringSchemaChange";
+    HTable ht = createTableAndValidate(tableName);
+    HColumnDescriptor hcd = new HColumnDescriptor("family2");
+    admin.addColumn(Bytes.toBytes(tableName), hcd);
+
+    miniHBaseCluster.getRegionServer(0).abort("Killing while instant schema change");
+    // Let the dust settle down
+    Thread.sleep(10000);
+    waitForSchemaChangeProcess(tableName);
+    Put put2 = new Put(row);
+    put2.add(Bytes.toBytes("family2"), qualifier, value);
+    ht.put(put2);
+
+    Get get2 = new Get(row);
+    get2.addColumn(Bytes.toBytes("family2"), qualifier);
+    Result r2 = ht.get(get2);
+    byte[] tvalue2 = r2.getValue(Bytes.toBytes("family2"), qualifier);
+    int result2 = Bytes.compareTo(value, tvalue2);
+    assertEquals(result2, 0);
+    String nodePath = msct.getSchemaChangeNodePathForTable("TestRSCrashDuringSchemaChange");
+    assertTrue(ZKUtil.checkExists(zkw, nodePath) == -1);
+    LOG.info("result2 = " + result2);
+    LOG.info("end testInstantSchemaChangeWhileRSCrash()");
+  }
+
+  /**
+   * Randomly bring down/up RS servers while schema change is in progress. This test
+   * is same as the above one but the only difference is that we intent to kill and start
+   * new RS instances while a schema change is in progress.
+   * @throws IOException
+   * @throws KeeperException
+   * @throws InterruptedException
+   */
+  @Test (timeout=70000)
+  public void testInstantSchemaChangeWhileRandomRSCrashAndStart() throws IOException,
+      KeeperException, InterruptedException {
+    LOG.info("Start testInstantSchemaChangeWhileRandomRSCrashAndStart()");
+    miniHBaseCluster.getRegionServer(4).abort("Killing RS 4");
+    // Start a new RS before schema change .
+    // Commenting the start RS as it is failing with DFS user permission NPE.
+    //miniHBaseCluster.startRegionServer();
+
+    // Let the dust settle
+    Thread.sleep(10000);
+    final String tableName = "testInstantSchemaChangeWhileRandomRSCrashAndStart";
+    HTable ht = createTableAndValidate(tableName);
+    HColumnDescriptor hcd = new HColumnDescriptor("family2");
+    admin.addColumn(Bytes.toBytes(tableName), hcd);
+    // Kill 2 RS now.
+    miniHBaseCluster.getRegionServer(2).abort("Killing RS 2");
+    // Let the dust settle
+    Thread.sleep(10000);
+    // We will be left with only one RS.
+    waitForSchemaChangeProcess(tableName);
+    assertFalse(msct.doesSchemaChangeNodeExists(tableName));
+    Put put2 = new Put(row);
+    put2.add(Bytes.toBytes("family2"), qualifier, value);
+    ht.put(put2);
+
+    Get get2 = new Get(row);
+    get2.addColumn(Bytes.toBytes("family2"), qualifier);
+    Result r2 = ht.get(get2);
+    byte[] tvalue2 = r2.getValue(Bytes.toBytes("family2"), qualifier);
+    int result2 = Bytes.compareTo(value, tvalue2);
+    assertEquals(result2, 0);
+    LOG.info("result2 = " + result2);
+    LOG.info("end testInstantSchemaChangeWhileRandomRSCrashAndStart()");
+  }
+
+  /**
+   * Test scenario where primary master is brought down while processing an
+   * alter request. This is harder one as it is very difficult the time this.
+   * @throws IOException
+   * @throws KeeperException
+   * @throws InterruptedException
+   */
+
+  @Test (timeout=50000)
+  public void testInstantSchemaChangeWhileMasterFailover() throws IOException,
+      KeeperException, InterruptedException {
+    LOG.info("Start testInstantSchemaChangeWhileMasterFailover()");
+    //Thread.sleep(5000);
+
+    final String tableName = "testInstantSchemaChangeWhileMasterFailover";
+    HTable ht = createTableAndValidate(tableName);
+    HColumnDescriptor hcd = new HColumnDescriptor("family2");
+    admin.addColumn(Bytes.toBytes(tableName), hcd);
+    // Kill primary master now.
+    Thread.sleep(50);
+    miniHBaseCluster.getMaster().abort("Aborting master now", new Exception("Schema exception"));
+
+    // It may not be possible for us to check the schema change status
+    // using waitForSchemaChangeProcess as our ZK session in MasterSchemachangeTracker will be
+    // lost when master dies and hence may not be accurate. So relying on old-fashioned
+    // sleep here.
+    Thread.sleep(25000);
+    Put put2 = new Put(row);
+    put2.add(Bytes.toBytes("family2"), qualifier, value);
+    ht.put(put2);
+
+    Get get2 = new Get(row);
+    get2.addColumn(Bytes.toBytes("family2"), qualifier);
+    Result r2 = ht.get(get2);
+    byte[] tvalue2 = r2.getValue(Bytes.toBytes("family2"), qualifier);
+    int result2 = Bytes.compareTo(value, tvalue2);
+    assertEquals(result2, 0);
+    LOG.info("result2 = " + result2);
+    LOG.info("end testInstantSchemaChangeWhileMasterFailover()");
+  }
+
+  /**
+   * TEst the master fail over during a schema change request in ZK.
+   * We create a fake schema change request in ZK and abort the primary master
+   * mid-flight to simulate a master fail over scenario during a mid-flight
+   * schema change process. The new master's schema janitor will eventually
+   * cleanup this fake request after time out.
+   * @throws IOException
+   * @throws KeeperException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testInstantSchemaOperationsInZKForMasterFailover() throws IOException,
+      KeeperException, InterruptedException {
+    LOG.info("testInstantSchemaOperationsInZKForMasterFailover() ");
+    String tableName = "testInstantSchemaOperationsInZKForMasterFailover";
+
+    conf = TEST_UTIL.getConfiguration();
+    MasterSchemaChangeTracker activesct =
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+    activesct.createSchemaChangeNode(tableName, 10);
+    LOG.debug(activesct.getSchemaChangeNodePathForTable(tableName)
+        + " created");
+    assertTrue(activesct.doesSchemaChangeNodeExists(tableName));
+    // Kill primary master now.
+    miniHBaseCluster.getMaster().abort("Aborting master now", new Exception("Schema exception"));
+    // wait for 50 secs. This is so that our schema janitor from fail-over master will kick-in and
+    // cleanup this failed/expired schema change request.
+    Thread.sleep(50000);
+    MasterSchemaChangeTracker newmsct = miniHBaseCluster.getMaster().getSchemaChangeTracker();
+    assertFalse(newmsct.doesSchemaChangeNodeExists(tableName));
+    LOG.debug(newmsct.getSchemaChangeNodePathForTable(tableName)
+        + " deleted");
+    LOG.info("END testInstantSchemaOperationsInZKForMasterFailover() ");
+  }
+
+  private HTable createTableAndValidate(String tableName) throws IOException {
+    conf = TEST_UTIL.getConfiguration();
+    LOG.info("Start createTableAndValidate()");
+    HTableDescriptor[] tables = admin.listTables();
+    int numTables = 0;
+    if (tables != null) {
+      numTables = tables.length;
+    }
+    HTable ht = TEST_UTIL.createTable(Bytes.toBytes(tableName),
+      HConstants.CATALOG_FAMILY);
+    tables = this.admin.listTables();
+    assertEquals(numTables + 1, tables.length);
+    LOG.info("created table = " + tableName);
+    return ht;
+  }
+
+}
+
+

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChangeSplit.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChangeSplit.java?rev=1204611&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChangeSplit.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestInstantSchemaChangeSplit.java Mon Nov 21 17:29:32 2011
@@ -0,0 +1,217 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.io.hfile.Compression;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MasterSchemaChangeTracker;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestInstantSchemaChangeSplit extends InstantSchemaChangeTestBase {
+
+  final Log LOG = LogFactory.getLog(getClass());
+
+  /**
+   * The objective of the following test is to validate that schema exclusions happen properly.
+   * When a RS server dies or crashes(?) mid-flight during a schema refresh, we would exclude
+   * all online regions in that RS, as well as the RS itself from schema change process.
+   *
+   * @throws IOException
+   * @throws KeeperException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testInstantSchemaChangeExclusions() throws IOException,
+      KeeperException, InterruptedException {
+    MasterSchemaChangeTracker msct =
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+    LOG.info("Start testInstantSchemaChangeExclusions() ");
+    String tableName = "testInstantSchemaChangeExclusions";
+    HTable ht = createTableAndValidate(tableName);
+
+    HColumnDescriptor hcd = new HColumnDescriptor(HConstants.CATALOG_FAMILY);
+    hcd.setMaxVersions(99);
+    hcd.setBlockCacheEnabled(false);
+
+    HRegionServer hrs = findRSWithOnlineRegionFor(tableName);
+    //miniHBaseCluster.getRegionServer(0).abort("killed for test");
+    admin.modifyColumn(Bytes.toBytes(tableName), hcd);
+    hrs.abort("Aborting for tests");
+    hrs.getSchemaChangeTracker().setSleepTimeMillis(20000);
+
+    //admin.modifyColumn(Bytes.toBytes(tableName), hcd);
+    LOG.debug("Waiting for Schema Change process to complete");
+    waitForSchemaChangeProcess(tableName, 15000);
+    assertEquals(msct.doesSchemaChangeNodeExists(tableName), false);
+    // Sleep for some time so that our region is reassigned to some other RS
+    // by master.
+    Thread.sleep(10000);
+    List<HRegion> onlineRegions
+        = miniHBaseCluster.getRegions(Bytes.toBytes("testInstantSchemaChangeExclusions"));
+    assertTrue(!onlineRegions.isEmpty());
+    for (HRegion onlineRegion : onlineRegions) {
+      HTableDescriptor htd = onlineRegion.getTableDesc();
+      HColumnDescriptor tableHcd = htd.getFamily(HConstants.CATALOG_FAMILY);
+      assertTrue(tableHcd.isBlockCacheEnabled() == false);
+      assertEquals(tableHcd.getMaxVersions(), 99);
+    }
+    LOG.info("End testInstantSchemaChangeExclusions() ");
+
+ }
+
+  /**
+   * This test validates that when a schema change request fails on the
+   * RS side, we appropriately register the failure in the Master Schema change
+   * tracker's node as well as capture the error cause.
+   *
+   * Currently an alter request fails if RS fails with an IO exception say due to
+   * missing or incorrect codec. With instant schema change the same failure happens
+   * and we register the failure with associated cause and also update the
+   * monitor status appropriately.
+   *
+   * The region(s) will be orphaned in both the cases.
+   *
+   */
+  @Test
+  public void testInstantSchemaChangeWhileRSOpenRegionFailure() throws IOException,
+      KeeperException, InterruptedException {
+    MasterSchemaChangeTracker msct =
+    TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+
+    LOG.info("Start testInstantSchemaChangeWhileRSOpenRegionFailure() ");
+    String tableName = "testInstantSchemaChangeWhileRSOpenRegionFailure";
+    HTable ht = createTableAndValidate(tableName);
+
+    // create now 100 regions
+    TEST_UTIL.createMultiRegions(conf, ht,
+      HConstants.CATALOG_FAMILY, 10);
+
+    // wait for all the regions to be assigned
+    Thread.sleep(10000);
+    List<HRegion> onlineRegions
+        = miniHBaseCluster.getRegions(
+        Bytes.toBytes("testInstantSchemaChangeWhileRSOpenRegionFailure"));
+    int size = onlineRegions.size();
+    // we will not have any online regions
+    LOG.info("Size of online regions = " + onlineRegions.size());
+
+    HColumnDescriptor hcd = new HColumnDescriptor(HConstants.CATALOG_FAMILY);
+    hcd.setMaxVersions(99);
+    hcd.setBlockCacheEnabled(false);
+    hcd.setCompressionType(Compression.Algorithm.SNAPPY);
+
+    admin.modifyColumn(Bytes.toBytes(tableName), hcd);
+    Thread.sleep(100);
+
+    assertEquals(msct.doesSchemaChangeNodeExists(tableName), true);
+    Thread.sleep(10000);
+    // get the current alter status and validate that its failure with appropriate error msg.
+    MasterSchemaChangeTracker.MasterAlterStatus mas = msct.getMasterAlterStatus(tableName);
+    assertTrue(mas != null);
+    assertEquals(mas.getCurrentAlterStatus(),
+        MasterSchemaChangeTracker.MasterAlterStatus.AlterState.FAILURE);
+    assertTrue(mas.getErrorCause() != null);
+    LOG.info("End testInstantSchemaChangeWhileRSOpenRegionFailure() ");
+ }
+
+  @Test
+  public void testConcurrentInstantSchemaChangeAndSplit() throws IOException,
+  InterruptedException, KeeperException {
+    final String tableName = "testConcurrentInstantSchemaChangeAndSplit";
+    conf = TEST_UTIL.getConfiguration();
+    LOG.info("Start testConcurrentInstantSchemaChangeAndSplit()");
+    final String newFamily = "newFamily";
+    HTable ht = createTableAndValidate(tableName);
+    final MasterSchemaChangeTracker msct =
+      TEST_UTIL.getHBaseCluster().getMaster().getSchemaChangeTracker();
+
+    // create now 10 regions
+    TEST_UTIL.createMultiRegions(conf, ht,
+        HConstants.CATALOG_FAMILY, 4);
+    int rowCount = TEST_UTIL.loadTable(ht, HConstants.CATALOG_FAMILY);
+    //assertRowCount(t, rowCount);
+
+    Runnable splitter = new Runnable() {
+      public void run() {
+        // run the splits now.
+        try {
+          LOG.info("Splitting table now ");
+          admin.split(Bytes.toBytes(tableName));
+        } catch (IOException e) {
+          e.printStackTrace();
+        } catch (InterruptedException e) {
+          e.printStackTrace();
+        }
+      }
+    };
+
+    Runnable schemaChanger = new Runnable() {
+      public void run() {
+        HColumnDescriptor hcd = new HColumnDescriptor(newFamily);
+        try {
+          admin.addColumn(Bytes.toBytes(tableName), hcd);
+        } catch (IOException ioe) {
+          ioe.printStackTrace();
+
+        }
+      }
+    };
+    schemaChanger.run();
+    Thread.sleep(50);
+    splitter.run();
+    waitForSchemaChangeProcess(tableName, 40000);
+
+    Put put1 = new Put(row);
+    put1.add(Bytes.toBytes(newFamily), qualifier, value);
+    LOG.info("******** Put into new column family ");
+    ht.put(put1);
+    ht.flushCommits();
+
+    LOG.info("******** Get from new column family ");
+    Get get1 = new Get(row);
+    get1.addColumn(Bytes.toBytes(newFamily), qualifier);
+    Result r = ht.get(get1);
+    byte[] tvalue = r.getValue(Bytes.toBytes(newFamily), qualifier);
+    LOG.info(" Value put = " + value + " value from table = " + tvalue);
+    int result = Bytes.compareTo(value, tvalue);
+    assertEquals(result, 0);
+    LOG.info("End testConcurrentInstantSchemaChangeAndSplit() ");
+  }
+      
+
+}
+
+

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java?rev=1204611&r1=1204610&r2=1204611&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java Mon Nov 21 17:29:32 2011
@@ -41,12 +41,15 @@ import org.apache.hadoop.hbase.client.HC
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.hbase.zookeeper.MasterSchemaChangeTracker;
+import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -139,11 +142,6 @@ public class TestCatalogJanitor {
     }
 
     @Override
-    public void checkTableModifiable(byte[] tableName) throws IOException {
-      //no-op
-    }
-
-    @Override
     public void createTable(HTableDescriptor desc, byte[][] splitKeys)
         throws IOException {
       // no-op
@@ -159,6 +157,11 @@ public class TestCatalogJanitor {
       return null;
     }
 
+    public void checkTableModifiable(byte[] tableName,
+                                     EventHandler.EventType eventType)
+        throws IOException {
+    }
+
     @Override
     public MasterFileSystem getMasterFileSystem() {
       return this.mfs;
@@ -243,6 +246,14 @@ public class TestCatalogJanitor {
         }
       };
     }
+
+    public MasterSchemaChangeTracker getSchemaChangeTracker() {
+      return null;
+    }
+
+    public RegionServerTracker getRegionServerTracker() {
+      return null;
+    }
   }
 
   @Test

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java?rev=1204611&r1=1204610&r2=1204611&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java Mon Nov 21 17:29:32 2011
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentSkipListMap;
 
@@ -55,6 +56,13 @@ public class MockRegionServerServices im
     return this.regions.get(encodedRegionName);
   }
 
+  public List<HRegion> getOnlineRegions(byte[] tableName) throws IOException {
+    return null;
+  }
+
+  public void refreshRegion(HRegion hRegion) throws IOException {
+  }
+
   @Override
   public void addToOnlineRegions(HRegion r) {
     this.regions.put(r.getRegionInfo().getEncodedName(), r);



Mime
View raw message