hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1377965 [3/3] - in /hbase/trunk: hbase-common/src/main/java/org/apache/hadoop/hbase/ hbase-server/src/main/java/org/apache/hadoop/hbase/ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-server/src/main/java/org/apache/hado...
Date Tue, 28 Aug 2012 03:40:49 GMT
Added: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/MetaMockingUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/MetaMockingUtil.java?rev=1377965&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/MetaMockingUtil.java (added)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/MetaMockingUtil.java Tue Aug 28 03:40:47 2012
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.catalog;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Mocking utility for common META functionality
+ */
+public class MetaMockingUtil {
+
+  /**
+   * Returns a Result object constructed from the given region information simulating
+   * a catalog table result.
+   * @param region the HRegionInfo object or null
+   * @return A mocked up Result that fakes a Get on a row in the <code>.META.</code> table.
+   * @throws IOException
+   */
+  public static Result getMetaTableRowResult(final HRegionInfo region)
+      throws IOException {
+    return getMetaTableRowResult(region, null, null, null);
+  }
+
+  /**
+   * Returns a Result object constructed from the given region information simulating
+   * a catalog table result.
+   * @param region the HRegionInfo object or null
+   * @param ServerName to use making startcode and server hostname:port in meta or null
+   * @return A mocked up Result that fakes a Get on a row in the <code>.META.</code> table.
+   * @throws IOException
+   */
+  public static Result getMetaTableRowResult(final HRegionInfo region, final ServerName sn)
+      throws IOException {
+    return getMetaTableRowResult(region, sn, null, null);
+  }
+
+  /**
+   * Returns a Result object constructed from the given region information simulating
+   * a catalog table result.
+   * @param region the HRegionInfo object or null
+   * @param ServerName to use making startcode and server hostname:port in meta or null
+   * @param splita daughter region or null
+   * @param splitb  daughter region or null
+   * @return A mocked up Result that fakes a Get on a row in the <code>.META.</code> table.
+   * @throws IOException
+   */
+  public static Result getMetaTableRowResult(HRegionInfo region, final ServerName sn,
+      HRegionInfo splita, HRegionInfo splitb) throws IOException {
+    List<KeyValue> kvs = new ArrayList<KeyValue>();
+    if (region != null) {
+      kvs.add(new KeyValue(
+        region.getRegionName(),
+        HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+        region.toByteArray()));
+    }
+
+    if (sn != null) {
+      kvs.add(new KeyValue(region.getRegionName(),
+        HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
+        Bytes.toBytes(sn.getHostAndPort())));
+      kvs.add(new KeyValue(region.getRegionName(),
+        HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
+        Bytes.toBytes(sn.getStartcode())));
+    }
+
+    if (splita != null) {
+      kvs.add(new KeyValue(
+          region.getRegionName(),
+          HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
+          splita.toByteArray()));
+    }
+
+    if (splitb != null) {
+      kvs.add(new KeyValue(
+          region.getRegionName(),
+          HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
+          splitb.toByteArray()));
+    }
+
+    //important: sort the kvs so that binary search work
+    Collections.sort(kvs, KeyValue.META_COMPARATOR);
+
+    return new Result(kvs);
+  }
+
+  /**
+   * @param sn  ServerName to use making startcode and server in meta
+   * @param hri Region to serialize into HRegionInfo
+   * @return A mocked up Result that fakes a Get on a row in the <code>.META.</code> table.
+   * @throws IOException
+   */
+  public static Result getMetaTableRowResultAsSplitRegion(final HRegionInfo hri, final ServerName sn)
+    throws IOException {
+    hri.setOffline(true);
+    hri.setSplit(true);
+    return getMetaTableRowResult(hri, sn);
+  }
+
+}

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java Tue Aug 28 03:40:47 2012
@@ -23,8 +23,6 @@ import static org.junit.Assert.assertTru
 
 import java.io.IOException;
 import java.net.ConnectException;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -38,7 +36,6 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.ServerName;
@@ -50,19 +47,18 @@ import org.apache.hadoop.hbase.client.HC
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RetriesExhaustedException;
 import org.apache.hadoop.hbase.client.ServerCallable;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.Progressable;
 import org.apache.zookeeper.KeeperException;
-import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -103,7 +99,7 @@ public class TestCatalogTracker {
       public void abort(String why, Throwable e) {
         LOG.info(why, e);
       }
-      
+
       @Override
       public boolean isAborted()  {
         return false;
@@ -127,9 +123,9 @@ public class TestCatalogTracker {
 
   /**
    * Test that we get notification if .META. moves.
-   * @throws IOException 
-   * @throws InterruptedException 
-   * @throws KeeperException 
+   * @throws IOException
+   * @throws InterruptedException
+   * @throws KeeperException
    */
   @Test public void testThatIfMETAMovesWeAreNotified()
   throws IOException, InterruptedException, KeeperException {
@@ -410,7 +406,7 @@ public class TestCatalogTracker {
 
   /**
    * Test waiting on meta w/ no timeout specified.
-   * @throws Exception 
+   * @throws Exception
    */
   @Ignore // Can't make it work reliably on all platforms; mockito gets confused
   // Throwing: org.mockito.exceptions.misusing.WrongTypeOfReturnValue:
@@ -517,20 +513,10 @@ public class TestCatalogTracker {
   /**
    * @return A mocked up Result that fakes a Get on a row in the
    * <code>.META.</code> table.
-   * @throws IOException 
+   * @throws IOException
    */
   private Result getMetaTableRowResult() throws IOException {
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
-      HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-      Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO)));
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
-      HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
-      Bytes.toBytes(SN.getHostAndPort())));
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
-      HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
-      Bytes.toBytes(SN.getStartcode())));
-    return new Result(kvs);
+    return MetaMockingUtil.getMetaTableRowResult(HRegionInfo.FIRST_META_REGIONINFO, SN);
   }
 
   private void startWaitAliveThenWaitItLives(final Thread t, final int ms) {

Added: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java?rev=1377965&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java (added)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java Tue Aug 28 03:40:47 2012
@@ -0,0 +1,410 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.catalog;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test migration that changes HRI serialization into PB. Tests by bringing up a cluster from actual
+ * data from a 0.92 cluster, as well as manually downgrading and then upgrading the META info.
+ * @deprecated Remove after 0.96
+ */
+@Category(MediumTests.class)
+@Deprecated
+public class TestMetaMigrationConvertingToPB {
+  static final Log LOG = LogFactory.getLog(TestMetaMigrationConvertingToPB.class);
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private final static String TESTTABLE = "TestTable";
+
+  private final static int ROW_COUNT = 100;
+  private final static int REGION_COUNT = 9; //initial number of regions of the TestTable
+
+  private static final int META_VERSION_092 = 0;
+
+  /*
+   * This test uses a tgz file named "TestMetaMigrationConvertingToPB.tgz" under
+   * hbase-server/src/test/data which contains file data from a 0.92 cluster.
+   * The cluster has a table named "TestTable", which has 100 rows. 0.94 has same
+   * META structure, so it should be the same.
+   *
+   * hbase(main):001:0> create 'TestTable', 'f1'
+   * hbase(main):002:0> for i in 1..100
+   * hbase(main):003:1> put 'TestTable', "row#{i}", "f1:c1", i
+   * hbase(main):004:1> end
+   *
+   * There are 9 regions in the table
+   */
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    // Start up our mini cluster on top of an 0.92 root.dir that has data from
+    // a 0.92 hbase run -- it has a table with 100 rows in it  -- and see if
+    // we can migrate from 0.92
+    TEST_UTIL.startMiniZKCluster();
+    TEST_UTIL.startMiniDFSCluster(1);
+    Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationConvertToPB");
+    // Untar our test dir.
+    File untar = untar(new File(testdir.toString()));
+    // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
+    Configuration conf = TEST_UTIL.getConfiguration();
+    FsShell shell = new FsShell(conf);
+    FileSystem fs = FileSystem.get(conf);
+    // find where hbase will root itself, so we can copy filesystem there
+    Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
+    if (!fs.isDirectory(hbaseRootDir.getParent())) {
+      // mkdir at first
+      doFsCommand(shell,
+        new String [] {"-mkdir", hbaseRootDir.getParent().toString()});
+    }
+    doFsCommand(shell,
+      new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});
+    // See whats in minihdfs.
+    doFsCommand(shell, new String [] {"-lsr", "/"});
+    TEST_UTIL.startMiniHBaseCluster(1, 1);
+    // Assert we are running against the copied-up filesystem.  The copied-up
+    // rootdir should have had a table named 'TestTable' in it.  Assert it
+    // present.
+    HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE);
+    ResultScanner scanner = t.getScanner(new Scan());
+    int count = 0;
+    while (scanner.next() != null) {
+      count++;
+    }
+    // Assert that we find all 100 rows that are in the data we loaded.  If
+    // so then we must have migrated it from 0.90 to 0.92.
+    Assert.assertEquals(ROW_COUNT, count);
+    scanner.close();
+    t.close();
+  }
+
+  private static File untar(final File testdir) throws IOException {
+    // Find the src data under src/test/data
+    final String datafile = "TestMetaMigrationConvertToPB";
+    String srcTarFile =
+      System.getProperty("project.build.testSourceDirectory", "src/test") +
+      File.separator + "data" + File.separator + datafile + ".tgz";
+    File homedir = new File(testdir.toString());
+    File tgtUntarDir = new File(homedir, datafile);
+    if (tgtUntarDir.exists()) {
+      if (!FileUtil.fullyDelete(tgtUntarDir)) {
+        throw new IOException("Failed delete of " + tgtUntarDir.toString());
+      }
+    }
+    LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
+    FileUtil.unTar(new File(srcTarFile), homedir);
+    Assert.assertTrue(tgtUntarDir.exists());
+    return tgtUntarDir;
+  }
+
+  private static void doFsCommand(final FsShell shell, final String [] args)
+  throws Exception {
+    // Run the 'put' command.
+    int errcode = shell.run(args);
+    if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testMetaUpdatedFlagInROOT() throws Exception {
+    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
+    boolean metaUpdated = MetaMigrationConvertingToPB.
+      isMetaHRIUpdated(master.getCatalogTracker());
+    assertEquals(true, metaUpdated);
+    verifyMetaRowsAreUpdated(master.getCatalogTracker());
+  }
+
+  @Test
+  public void testMetaMigration() throws Exception {
+    LOG.info("Starting testMetaMigration");
+    final byte [] FAMILY = Bytes.toBytes("family");
+    HTableDescriptor htd = new HTableDescriptor("testMetaMigration");
+    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
+      htd.addFamily(hcd);
+    Configuration conf = TEST_UTIL.getConfiguration();
+    byte[][] regionNames = new byte[][]{
+        HConstants.EMPTY_START_ROW,
+        Bytes.toBytes("region_a"),
+        Bytes.toBytes("region_b")};
+    createMultiRegionsWithWritableSerialization(conf, htd.getName(), regionNames);
+    CatalogTracker ct =
+      TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
+    // Erase the current version of root meta for this test.
+    undoVersionInRoot(ct);
+    MetaReader.fullScanMetaAndPrint(ct);
+    LOG.info("Meta Print completed.testMetaMigration");
+
+    long numMigratedRows = MetaMigrationConvertingToPB.updateMeta(
+        TEST_UTIL.getHBaseCluster().getMaster());
+    MetaReader.fullScanMetaAndPrint(ct);
+
+    // Should be one entry only and it should be for the table we just added.
+    assertEquals(regionNames.length, numMigratedRows);
+
+    // Assert that the flag in ROOT is updated to reflect the correct status
+    boolean metaUpdated =
+        MetaMigrationConvertingToPB.isMetaHRIUpdated(
+        TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker());
+    assertEquals(true, metaUpdated);
+    verifyMetaRowsAreUpdated(ct);
+  }
+
+  /**
+   * This test assumes a master crash/failure during the meta migration process
+   * and attempts to continue the meta migration process when a new master takes over.
+   * When a master dies during the meta migration we will have some rows of
+   * META.CatalogFamily updated with PB serialization and some
+   * still hanging with writable serialization. When the backup master/ or
+   * fresh start of master attempts the migration it will encounter some rows of META
+   * already updated with new HRI and some still legacy. This test will simulate this
+   * scenario and validates that the migration process can safely skip the updated
+   * rows and migrate any pending rows at startup.
+   * @throws Exception
+   */
+  @Test
+  public void testMasterCrashDuringMetaMigration() throws Exception {
+    final byte[] FAMILY = Bytes.toBytes("family");
+    HTableDescriptor htd = new HTableDescriptor("testMasterCrashDuringMetaMigration");
+    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
+      htd.addFamily(hcd);
+    Configuration conf = TEST_UTIL.getConfiguration();
+    // Create 10 New regions.
+    createMultiRegionsWithPBSerialization(conf, htd.getName(), 10);
+    // Create 10 Legacy regions.
+    createMultiRegionsWithWritableSerialization(conf, htd.getName(), 10);
+    CatalogTracker ct =
+      TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
+    // Erase the current version of root meta for this test.
+    undoVersionInRoot(ct);
+
+    MetaReader.fullScanMetaAndPrint(ct);
+    LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");
+
+    long numMigratedRows =
+        MetaMigrationConvertingToPB.updateRootAndMetaIfNecessary(
+            TEST_UTIL.getHBaseCluster().getMaster());
+    assertEquals(numMigratedRows, 10);
+
+    // Assert that the flag in ROOT is updated to reflect the correct status
+    boolean metaUpdated = MetaMigrationConvertingToPB.
+      isMetaHRIUpdated(TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker());
+    assertEquals(true, metaUpdated);
+
+    verifyMetaRowsAreUpdated(ct);
+
+    LOG.info("END testMasterCrashDuringMetaMigration");
+  }
+
+  /**
+   * Verify that every META row is updated
+   */
+  void verifyMetaRowsAreUpdated(CatalogTracker catalogTracker)
+      throws IOException {
+    List<Result> results = MetaReader.fullScan(catalogTracker);
+    assertTrue(results.size() >= REGION_COUNT);
+
+    for (Result result : results) {
+      byte[] hriBytes = result.getValue(HConstants.CATALOG_FAMILY,
+          HConstants.REGIONINFO_QUALIFIER);
+      assertTrue(hriBytes != null && hriBytes.length > 0);
+      assertTrue(MetaMigrationConvertingToPB.isMigrated(hriBytes));
+
+      byte[] splitA = result.getValue(HConstants.CATALOG_FAMILY,
+          HConstants.SPLITA_QUALIFIER);
+      if (splitA != null && splitA.length > 0) {
+        assertTrue(MetaMigrationConvertingToPB.isMigrated(splitA));
+      }
+
+      byte[] splitB = result.getValue(HConstants.CATALOG_FAMILY,
+          HConstants.SPLITB_QUALIFIER);
+      if (splitB != null && splitB.length > 0) {
+        assertTrue(MetaMigrationConvertingToPB.isMigrated(splitB));
+      }
+    }
+  }
+
+  /** Changes the version of META to 0 to simulate 0.92 and 0.94 clusters*/
+  private void undoVersionInRoot(CatalogTracker ct) throws IOException {
+    Put p = new Put(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
+
+    p.add(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER,
+        Bytes.toBytes(META_VERSION_092));
+
+    MetaEditor.putToRootTable(ct, p);
+    LOG.info("Downgraded -ROOT- meta version=" + META_VERSION_092);
+  }
+
+  /**
+   * Inserts multiple regions into META using Writable serialization instead of PB
+   */
+  public int createMultiRegionsWithWritableSerialization(final Configuration c,
+      final byte[] tableName, int numRegions) throws IOException {
+    if (numRegions < 3) throw new IOException("Must create at least 3 regions");
+    byte [] startKey = Bytes.toBytes("aaaaa");
+    byte [] endKey = Bytes.toBytes("zzzzz");
+    byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
+    byte [][] regionStartKeys = new byte[splitKeys.length+1][];
+    for (int i=0;i<splitKeys.length;i++) {
+      regionStartKeys[i+1] = splitKeys[i];
+    }
+    regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
+    return createMultiRegionsWithWritableSerialization(c, tableName, regionStartKeys);
+  }
+
+  /**
+   * Inserts multiple regions into META using Writable serialization instead of PB
+   */
+  public int createMultiRegionsWithWritableSerialization(final Configuration c,
+      final byte[] tableName, byte [][] startKeys)
+  throws IOException {
+    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
+    HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
+
+    List<HRegionInfo> newRegions
+        = new ArrayList<HRegionInfo>(startKeys.length);
+    int count = 0;
+    for (int i = 0; i < startKeys.length; i++) {
+      int j = (i + 1) % startKeys.length;
+      HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
+      Put put = new Put(hri.getRegionName());
+      put.setWriteToWAL(false);
+      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+        getBytes(hri)); //this is the old Writable serialization
+
+      //also add the region as it's daughters
+      put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
+          getBytes(hri)); //this is the old Writable serialization
+
+      put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
+          getBytes(hri)); //this is the old Writable serialization
+
+      meta.put(put);
+      LOG.info("createMultiRegionsWithWritableSerialization: PUT inserted " + hri.toString());
+
+      newRegions.add(hri);
+      count++;
+    }
+    meta.close();
+    return count;
+  }
+
+  @Deprecated
+  private byte[] getBytes(HRegionInfo hri) throws IOException {
+    DataOutputBuffer out = new DataOutputBuffer();
+    try {
+      hri.write(out);
+      return out.getData();
+    } finally {
+      if (out != null) {
+        out.close();
+      }
+    }
+  }
+
+  /**
+   * Inserts multiple regions into META using PB serialization
+   */
+  int createMultiRegionsWithPBSerialization(final Configuration c,
+      final byte[] tableName, int numRegions)
+  throws IOException {
+    if (numRegions < 3) throw new IOException("Must create at least 3 regions");
+    byte [] startKey = Bytes.toBytes("aaaaa");
+    byte [] endKey = Bytes.toBytes("zzzzz");
+    byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
+    byte [][] regionStartKeys = new byte[splitKeys.length+1][];
+    for (int i=0;i<splitKeys.length;i++) {
+      regionStartKeys[i+1] = splitKeys[i];
+    }
+    regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
+    return createMultiRegionsWithPBSerialization(c, tableName, regionStartKeys);
+  }
+
+  /**
+   * Inserts multiple regions into META using PB serialization
+   */
+  int createMultiRegionsWithPBSerialization(final Configuration c, final byte[] tableName,
+      byte [][] startKeys) throws IOException {
+    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
+    HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
+
+    List<HRegionInfo> newRegions
+        = new ArrayList<HRegionInfo>(startKeys.length);
+    int count = 0;
+    for (int i = 0; i < startKeys.length; i++) {
+      int j = (i + 1) % startKeys.length;
+      HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
+      Put put = MetaEditor.makePutFromRegionInfo(hri);
+      put.setWriteToWAL(false);
+      meta.put(put);
+      LOG.info("createMultiRegionsWithPBSerialization: PUT inserted " + hri.toString());
+
+      newRegions.add(hri);
+      count++;
+    }
+    meta.close();
+    return count;
+  }
+
+  @org.junit.Rule
+  public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
+    new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
+
+}

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java Tue Aug 28 03:40:47 2012
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.catalog;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -26,7 +28,14 @@ import java.util.NavigableMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClientProtocol;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -36,7 +45,6 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.After;
 import org.junit.Before;
@@ -79,12 +87,41 @@ public class TestMetaReaderEditorNoClust
     UTIL.shutdownMiniZKCluster();
   }
 
+  @Test
+  public void testGetHRegionInfo() throws IOException {
+    assertNull(HRegionInfo.getHRegionInfo(new Result()));
+
+    List<KeyValue> kvs = new ArrayList<KeyValue>();
+    Result r = new Result(kvs);
+    assertNull(HRegionInfo.getHRegionInfo(r));
+
+    byte [] f = HConstants.CATALOG_FAMILY;
+    // Make a key value that doesn't have the expected qualifier.
+    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
+      HConstants.SERVER_QUALIFIER, f));
+    r = new Result(kvs);
+    assertNull(HRegionInfo.getHRegionInfo(r));
+    // Make a key that does not have a regioninfo value.
+    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
+      HConstants.REGIONINFO_QUALIFIER, f));
+    HRegionInfo hri = HRegionInfo.getHRegionInfo(new Result(kvs));
+    assertTrue(hri == null);
+    // OK, give it what it expects
+    kvs.clear();
+    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
+      HConstants.REGIONINFO_QUALIFIER,
+      HRegionInfo.FIRST_META_REGIONINFO.toByteArray()));
+    hri = HRegionInfo.getHRegionInfo(new Result(kvs));
+    assertNotNull(hri);
+    assertTrue(hri.equals(HRegionInfo.FIRST_META_REGIONINFO));
+  }
+
   /**
    * Test that MetaReader will ride over server throwing
    * "Server not running" IOEs.
    * @see https://issues.apache.org/jira/browse/HBASE-3446
-   * @throws IOException 
-   * @throws InterruptedException 
+   * @throws IOException
+   * @throws InterruptedException
    */
   @Test
   public void testRideOverServerNotRunning()
@@ -112,7 +149,7 @@ public class TestMetaReaderEditorNoClust
       final byte [] rowToVerify = Bytes.toBytes("rowToVerify");
       kvs.add(new KeyValue(rowToVerify,
         HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO)));
+        HRegionInfo.FIRST_META_REGIONINFO.toByteArray()));
       kvs.add(new KeyValue(rowToVerify,
         HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
         Bytes.toBytes(sn.getHostAndPort())));

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/Mocking.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/Mocking.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/Mocking.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/Mocking.java Tue Aug 28 03:40:47 2012
@@ -19,20 +19,11 @@ package org.apache.hadoop.hbase.master;
 
 import static org.junit.Assert.assertNotSame;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
 import org.apache.hadoop.hbase.DeserializationException;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.executor.EventHandler.EventType;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -42,45 +33,6 @@ import org.apache.zookeeper.KeeperExcept
  * Package scoped mocking utility.
  */
 public class Mocking {
-  /**
-   * @param sn ServerName to use making startcode and server in meta
-   * @param hri Region to serialize into HRegionInfo
-   * @return A mocked up Result that fakes a Get on a row in the
-   * <code>.META.</code> table.
-   * @throws IOException 
-   */
-  static Result getMetaTableRowResult(final HRegionInfo hri,
-      final ServerName sn)
-  throws IOException {
-    // TODO: Move to a utilities class.  More than one test case can make use
-    // of this facility.
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
-      HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-      Writables.getBytes(hri)));
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
-      HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
-      Bytes.toBytes(sn.getHostAndPort())));
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
-      HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
-      Bytes.toBytes(sn.getStartcode())));
-    return new Result(kvs);
-  }
-
-
-  /**
-   * @param sn  ServerName to use making startcode and server in meta
-   * @param hri Region to serialize into HRegionInfo
-   * @return A mocked up Result that fakes a Get on a row in the <code>.META.</code> table.
-   * @throws IOException
-   */
-  static Result getMetaTableRowResultAsSplitRegion(final HRegionInfo hri, final ServerName sn)
-    throws IOException {
-    hri.setOffline(true);
-    hri.setSplit(true);
-    return getMetaTableRowResult(hri, sn);
-  }
-
 
   static void waitForRegionPendingOpenInRIT(AssignmentManager am, String encodedName)
     throws InterruptedException {
@@ -110,7 +62,7 @@ public class Mocking {
    * @param sn Name of the regionserver doing the 'opening'
    * @param hri Region we're 'opening'.
    * @throws KeeperException
-   * @throws DeserializationException 
+   * @throws DeserializationException
    */
   static void fakeRegionServerRegionOpenInZK(HMaster master,  final ZooKeeperWatcher w,
       final ServerName sn, final HRegionInfo hri)
@@ -147,7 +99,7 @@ public class Mocking {
    * @param region
    * @param expectedState
    * @return true if region exists and is in expected state
-   * @throws DeserializationException 
+   * @throws DeserializationException
    */
   static boolean verifyRegionState(ZooKeeperWatcher zkw, HRegionInfo region, EventType expectedState)
   throws KeeperException, DeserializationException {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java Tue Aug 28 03:40:47 2012
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.ServerLoa
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaMockingUtil;
 import org.apache.hadoop.hbase.client.ClientProtocol;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
@@ -554,9 +555,9 @@ public class TestAssignmentManager {
     
     Result r = null;
     if (splitRegion) {
-      r = Mocking.getMetaTableRowResultAsSplitRegion(REGIONINFO, SERVERNAME_A);
+      r = MetaMockingUtil.getMetaTableRowResultAsSplitRegion(REGIONINFO, SERVERNAME_A);
     } else {
-      r = Mocking.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
+      r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
     }
     
     ScanResponse.Builder builder = ScanResponse.newBuilder();
@@ -918,7 +919,7 @@ public class TestAssignmentManager {
     // with an encoded name by doing a Get on .META.
     ClientProtocol ri = Mockito.mock(ClientProtocol.class);
     // Get a meta row result that has region up on SERVERNAME_A for REGIONINFO
-    Result r = Mocking.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
+    Result r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
     ScanResponse.Builder builder = ScanResponse.newBuilder();
     builder.setMoreResults(true);
     builder.addResult(ProtobufUtil.toResult(r));

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java Tue Aug 28 03:40:47 2012
@@ -31,12 +31,11 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -77,10 +76,7 @@ public class TestAssignmentManagerOnClus
       HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
       HRegionInfo hri = new HRegionInfo(
         desc.getName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
-      Put put = new Put(hri.getRegionName());
-      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-          Writables.getBytes(hri));
-      meta.put(put);
+      MetaEditor.addRegionToMeta(meta, hri);
 
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
       master.assignRegion(hri);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java Tue Aug 28 03:40:47 2012
@@ -22,16 +22,12 @@ package org.apache.hadoop.hbase.master;
 import static org.apache.hadoop.hbase.util.HFileArchiveTestingUtil.assertArchiveEqualToOriginal;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.spy;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.Map;
 import java.util.SortedMap;
 import java.util.TreeMap;
@@ -46,13 +42,13 @@ import org.apache.hadoop.hbase.HColumnDe
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SmallTests;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaMockingUtil;
 import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.client.ClientProtocol;
 import org.apache.hadoop.hbase.client.HConnection;
@@ -70,9 +66,7 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
@@ -146,7 +140,7 @@ public class TestCatalogJanitor {
     public void abort(String why, Throwable e) {
       //no-op
     }
-    
+
     @Override
     public boolean isAborted() {
       return false;
@@ -235,7 +229,7 @@ public class TestCatalogJanitor {
     public void abort(String why, Throwable e) {
       //no-op
     }
-    
+
     @Override
     public boolean isAborted() {
       return false;
@@ -261,29 +255,29 @@ public class TestCatalogJanitor {
           // TODO Auto-generated method stub
           return null;
         }
-        
+
         @Override
         public Map<String, HTableDescriptor> getAll() throws IOException {
           // TODO Auto-generated method stub
           return null;
         }
-        
+
         @Override
         public HTableDescriptor get(byte[] tablename)
         throws FileNotFoundException, IOException {
           return get(Bytes.toString(tablename));
         }
-        
+
         @Override
         public HTableDescriptor get(String tablename)
         throws FileNotFoundException, IOException {
           return createHTableDescriptor();
         }
-        
+
         @Override
         public void add(HTableDescriptor htd) throws IOException {
           // TODO Auto-generated method stub
-          
+
         }
       };
     }
@@ -295,33 +289,6 @@ public class TestCatalogJanitor {
   }
 
   @Test
-  public void testGetHRegionInfo() throws IOException {
-    assertNull(CatalogJanitor.getHRegionInfo(new Result()));
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
-    Result r = new Result(kvs);
-    assertNull(CatalogJanitor.getHRegionInfo(r));
-    byte [] f = HConstants.CATALOG_FAMILY;
-    // Make a key value that doesn't have the expected qualifier.
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
-      HConstants.SERVER_QUALIFIER, f));
-    r = new Result(kvs);
-    assertNull(CatalogJanitor.getHRegionInfo(r));
-    // Make a key that does not have a regioninfo value.
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
-      HConstants.REGIONINFO_QUALIFIER, f));
-    HRegionInfo hri = CatalogJanitor.getHRegionInfo(new Result(kvs));
-    assertTrue(hri == null);
-    // OK, give it what it expects
-    kvs.clear();
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
-      HConstants.REGIONINFO_QUALIFIER,
-      Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO)));
-    hri = CatalogJanitor.getHRegionInfo(new Result(kvs));
-    assertNotNull(hri);
-    assertTrue(hri.equals(HRegionInfo.FIRST_META_REGIONINFO));
-  }
-
-  @Test
   public void testCleanParent() throws IOException, InterruptedException {
     HBaseTestingUtility htu = new HBaseTestingUtility();
     setRootDirAndCleanIt(htu, "testCleanParent");
@@ -343,12 +310,7 @@ public class TestCatalogJanitor {
             Bytes.toBytes("eee"));
       // Test that when both daughter regions are in place, that we do not
       // remove the parent.
-      List<KeyValue> kvs = new ArrayList<KeyValue>();
-      kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
-          HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
-      kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
-          HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
-      Result r = new Result(kvs);
+      Result r = createResult(parent, splita, splitb);
       // Add a reference under splitA directory so we don't clear out the parent.
       Path rootdir = services.getMasterFileSystem().getRootDir();
       Path tabledir =
@@ -541,9 +503,9 @@ public class TestCatalogJanitor {
 
     final Map<HRegionInfo, Result> splitParents =
         new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
-    splitParents.put(parent, makeResultFromHRegionInfo(parent, splita, splitb));
+    splitParents.put(parent, createResult(parent, splita, splitb));
     splita.setOffline(true); //simulate that splita goes offline when it is split
-    splitParents.put(splita, makeResultFromHRegionInfo(splita, splitaa, splitab));
+    splitParents.put(splita, createResult(splita, splitaa,splitab));
 
     CatalogJanitor janitor = spy(new CatalogJanitor(server, services));
     doReturn(new Pair<Integer, Map<HRegionInfo, Result>>(
@@ -586,12 +548,7 @@ public class TestCatalogJanitor {
     HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
     // Test that when both daughter regions are in place, that we do not
     // remove the parent.
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
-    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
-        HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
-    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
-        HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
-    Result r = new Result(kvs);
+    Result r = createResult(parent, splita, splitb);
 
     FileSystem fs = FileSystem.get(htu.getConfiguration());
     Path rootdir = services.getMasterFileSystem().getRootDir();
@@ -651,12 +608,7 @@ public class TestCatalogJanitor {
     HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
     // Test that when both daughter regions are in place, that we do not
     // remove the parent.
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
-    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
-        HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
-    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
-        HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
-    Result r = new Result(kvs);
+    Result r = createResult(parent, splita, splitb);
 
     FileSystem fs = FileSystem.get(htu.getConfiguration());
 
@@ -722,31 +674,6 @@ public class TestCatalogJanitor {
     assertEquals(count, storeFiles.length);
   }
 
-  private Result makeResultFromHRegionInfo(HRegionInfo region, HRegionInfo splita,
-      HRegionInfo splitb) throws IOException {
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
-    kvs.add(new KeyValue(
-        region.getRegionName(),
-        HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(region)));
-
-    if (splita != null) {
-      kvs.add(new KeyValue(
-          region.getRegionName(),
-          HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
-          Writables.getBytes(splita)));
-    }
-
-    if (splitb != null) {
-      kvs.add(new KeyValue(
-          region.getRegionName(),
-          HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
-          Writables.getBytes(splitb)));
-    }
-
-    return new Result(kvs);
-  }
-
   private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
       final String subdir)
   throws IOException {
@@ -788,12 +715,7 @@ public class TestCatalogJanitor {
   private Result createResult(final HRegionInfo parent, final HRegionInfo a,
       final HRegionInfo b)
   throws IOException {
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
-    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
-      HConstants.SPLITA_QUALIFIER, Writables.getBytes(a)));
-    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
-      HConstants.SPLITB_QUALIFIER, Writables.getBytes(b)));
-    return new Result(kvs);
+    return MetaMockingUtil.getMetaTableRowResult(parent, null, a, b);
   }
 
   private HTableDescriptor createHTableDescriptor() {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java Tue Aug 28 03:40:47 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaMockingUtil;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
@@ -153,17 +154,17 @@ public class TestMasterNoCluster {
     RootRegionTracker.setRootLocation(rs0.getZooKeeper(), rs0.getServerName());
     byte [] rootregion = Bytes.toBytes("-ROOT-,,0");
     rs0.setGetResult(rootregion, HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),
-      Mocking.getMetaTableRowResult(HRegionInfo.FIRST_META_REGIONINFO,
+      MetaMockingUtil.getMetaTableRowResult(HRegionInfo.FIRST_META_REGIONINFO,
         rs1.getServerName()));
     final byte [] tableName = Bytes.toBytes("t");
     Result [] results = new Result [] {
-      Mocking.getMetaTableRowResult(
+      MetaMockingUtil.getMetaTableRowResult(
         new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HBaseTestingUtility.KEYS[1]),
         rs2.getServerName()),
-      Mocking.getMetaTableRowResult(
+      MetaMockingUtil.getMetaTableRowResult(
         new HRegionInfo(tableName, HBaseTestingUtility.KEYS[1], HBaseTestingUtility.KEYS[2]),
         rs2.getServerName()),
-      Mocking.getMetaTableRowResult(new HRegionInfo(tableName, HBaseTestingUtility.KEYS[2],
+      MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName, HBaseTestingUtility.KEYS[2],
           HConstants.EMPTY_END_ROW),
         rs2.getServerName())
     };
@@ -349,7 +350,7 @@ public class TestMasterNoCluster {
       // confirm .META. has a server.
       byte [] rootregion = Bytes.toBytes("-ROOT-,,0");
       rs0.setGetResult(rootregion, HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),
-        Mocking.getMetaTableRowResult(HRegionInfo.FIRST_META_REGIONINFO,
+        MetaMockingUtil.getMetaTableRowResult(HRegionInfo.FIRST_META_REGIONINFO,
           rs0.getServerName()));
       // Master should now come up.
       while (!master.isInitialized()) {Threads.sleep(10);}

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java Tue Aug 28 03:40:47 2012
@@ -23,14 +23,16 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.LargeTests;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
@@ -97,7 +99,7 @@ public class TestMasterTransitions {
     private int closeCount = 0;
     static final int SERVER_DURATION = 3 * 1000;
     static final int CLOSE_DURATION = 1 * 1000;
- 
+
     HBase2428Listener(final MiniHBaseCluster c, final HServerAddress metaAddress,
         final HRegionInfo closingHRI, final int otherServerIndex) {
       this.cluster = c;
@@ -183,7 +185,7 @@ public class TestMasterTransitions {
   /**
    * In 2428, the meta region has just been set offline and then a close comes
    * in.
-   * @see <a href="https://issues.apache.org/jira/browse/HBASE-2428">HBASE-2428</a> 
+   * @see <a href="https://issues.apache.org/jira/browse/HBASE-2428">HBASE-2428</a>
    */
   @Ignore @Test  (timeout=300000) public void testRegionCloseWhenNoMetaHBase2428()
   throws Exception {
@@ -205,7 +207,7 @@ public class TestMasterTransitions {
     // Get a region out on the otherServer.
     final HRegionInfo hri =
       otherServer.getOnlineRegions().iterator().next().getRegionInfo();
- 
+
     // Add our RegionServerOperationsListener
     HBase2428Listener listener = new HBase2428Listener(cluster,
       metaHRS.getHServerInfo().getServerAddress(), hri, otherServerIndex);
@@ -312,7 +314,7 @@ public class TestMasterTransitions {
       this.copyOfOnlineRegions =
         this.victim.getCopyOfOnlineRegionsSortedBySize().values();
     }
- 
+
     @Override
     public boolean process(HServerInfo serverInfo, HMsg incomingMsg) {
       if (!victim.getServerInfo().equals(serverInfo) ||
@@ -365,7 +367,7 @@ public class TestMasterTransitions {
    * we kill it.  We then wait on all regions to come back on line.  If bug
    * is fixed, this should happen soon as the processing of the killed server is
    * done.
-   * @see <a href="https://issues.apache.org/jira/browse/HBASE-2482">HBASE-2482</a> 
+   * @see <a href="https://issues.apache.org/jira/browse/HBASE-2482">HBASE-2482</a>
    */
   @Ignore @Test (timeout=300000) public void testKillRSWithOpeningRegion2482()
   throws Exception {
@@ -483,10 +485,9 @@ public class TestMasterTransitions {
     scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
     ResultScanner s = meta.getScanner(scan);
     for (Result r = null; (r = s.next()) != null;) {
-      byte [] b =
-        r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-      if (b == null || b.length <= 0) break;
-      HRegionInfo hri = Writables.getHRegionInfo(b);
+      HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
+      if (hri == null) break;
+
       // If start key, add 'aaa'.
       byte [] row = getStartKey(hri);
       Put p = new Put(row);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java Tue Aug 28 03:40:47 2012
@@ -20,6 +20,11 @@
 package org.apache.hadoop.hbase.master;
 
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.util.Collection;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -27,7 +32,12 @@ import java.util.concurrent.atomic.Atomi
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -41,7 +51,6 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.util.Writables;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
@@ -51,11 +60,6 @@ import org.junit.experimental.categories
 import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assert.assertFalse;
-
 /**
  * Test open and close of regions using zk.
  */
@@ -265,7 +269,7 @@ public class TestZKBasedOpenCloseRegion 
     // remove the block and reset the boolean
     hr1.getRegionsInTransitionInRS().remove(hri.getEncodedNameAsBytes());
     reopenEventProcessed.set(false);
-    
+
     // now try moving a region when there is no region in transition.
     hri = getNonMetaRegion(ProtobufUtil.getOnlineRegions(hr1));
 
@@ -275,7 +279,7 @@ public class TestZKBasedOpenCloseRegion 
 
     cluster.getMaster().executorService.
       registerListener(EventType.RS_ZK_REGION_OPENED, openListener);
-    
+
     TEST_UTIL.getHBaseAdmin().move(hri.getEncodedNameAsBytes(),
         Bytes.toBytes(hr0.getServerName().toString()));
 
@@ -336,7 +340,7 @@ public class TestZKBasedOpenCloseRegion 
     assertFalse("Region should not be in RIT",
         regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
   }
-  
+
   private static void waitUntilAllRegionsAssigned()
   throws IOException {
     HTable meta = new HTable(TEST_UTIL.getConfiguration(),
@@ -381,12 +385,9 @@ public class TestZKBasedOpenCloseRegion 
     scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
     ResultScanner s = meta.getScanner(scan);
     for (Result r = null; (r = s.next()) != null;) {
-      byte [] b =
-        r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-      if (b == null || b.length <= 0) {
-        break;
-      }
-      HRegionInfo hri = Writables.getHRegionInfo(b);
+      HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
+      if (hri == null) break;
+
       // If start key, add 'aaa'.
       byte [] row = getStartKey(hri);
       Put p = new Put(row);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java Tue Aug 28 03:40:47 2012
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.LargeTest
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
@@ -328,10 +327,10 @@ public class TestEndToEndSplitTransactio
           break;
         }
 
-        HRegionInfo region = MetaEditor.getHRegionInfo(result);
+        HRegionInfo region = HRegionInfo.getHRegionInfo(result);
         if(region.isSplitParent()) {
           log("found parent region: " + region.toString());
-          PairOfSameType<HRegionInfo> pair = MetaEditor.getDaughterRegions(result);
+          PairOfSameType<HRegionInfo> pair = HRegionInfo.getDaughterRegions(result);
           daughterA = pair.getFirst();
           daughterB = pair.getSecond();
           break;
@@ -367,7 +366,7 @@ public class TestEndToEndSplitTransactio
       while (System.currentTimeMillis() - start < timeout) {
         Result result = getRegionRow(regionName);
         if (result != null) {
-          HRegionInfo info = MetaEditor.getHRegionInfo(result);
+          HRegionInfo info = HRegionInfo.getHRegionInfo(result);
           if (info != null && !info.isOffline()) {
             log("found region in META: " + Bytes.toStringBinary(regionName));
             break;

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java Tue Aug 28 03:40:47 2012
@@ -27,14 +27,19 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TestGet;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.experimental.categories.Category;
 
 /**
@@ -75,10 +80,9 @@ public class TestGetClosestAtOrBefore ex
         HRegionInfo hri = new HRegionInfo(htd.getName(),
           i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i),
           i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval));
-        Put put = new Put(hri.getRegionName());
+
+        Put put = MetaEditor.makePutFromRegionInfo(hri);
         put.setWriteToWAL(false);
-        put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-                Writables.getBytes(hri));
         mr.put(put, false);
       }
     }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java Tue Aug 28 03:40:47 2012
@@ -19,8 +19,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -46,7 +44,6 @@ import org.apache.hadoop.hbase.filter.In
 import org.apache.hadoop.hbase.filter.PrefixFilter;
 import org.apache.hadoop.hbase.filter.WhileMatchFilter;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
 import org.junit.experimental.categories.Category;
 
 /**
@@ -241,11 +238,8 @@ public class TestScanner extends HBaseTe
 
       Put put = new Put(ROW_KEY, System.currentTimeMillis(), null);
 
-      ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
-      DataOutputStream s = new DataOutputStream(byteStream);
-      REGION_INFO.write(s);
       put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-          byteStream.toByteArray());
+          REGION_INFO.toByteArray());
       region.put(put);
 
       // What we just committed is in the memstore. Verify that we can get
@@ -346,8 +340,7 @@ public class TestScanner extends HBaseTe
 
   /** Compare the HRegionInfo we read from HBase to what we stored */
   private void validateRegionInfo(byte [] regionBytes) throws IOException {
-    HRegionInfo info =
-      (HRegionInfo) Writables.getWritable(regionBytes, new HRegionInfo());
+    HRegionInfo info = HRegionInfo.parseFromOrNull(regionBytes);
 
     assertEquals(REGION_INFO.getRegionId(), info.getRegionId());
     assertEquals(0, info.getStartKey().length);
@@ -489,7 +482,7 @@ public class TestScanner extends HBaseTe
   /**
    * Make sure scanner returns correct result when we run a major compaction
    * with deletes.
-   * 
+   *
    * @throws Exception
    */
   @SuppressWarnings("deprecation")

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java Tue Aug 28 03:40:47 2012
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.LargeTest
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -128,7 +129,7 @@ public class TestHBaseFsck {
   @Test
   public void testHBaseFsck() throws Exception {
     assertNoErrors(doFsck(conf, false));
-    String table = "tableBadMetaAssign"; 
+    String table = "tableBadMetaAssign";
     TEST_UTIL.createTable(Bytes.toBytes(table), FAM);
 
     // We created 1 table, should be fine
@@ -193,10 +194,8 @@ public class TestHBaseFsck {
       throws IOException {
     HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
     HRegionInfo hri = new HRegionInfo(htd.getName(), startKey, endKey);
-    Put put = new Put(hri.getRegionName());
-    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(hri));
-    meta.put(put);
+    MetaEditor.addRegionToMeta(meta, hri);
+    meta.close();
     return hri;
   }
 
@@ -300,7 +299,7 @@ public class TestHBaseFsck {
 
   /**
    * Setup a clean table before we start mucking with it.
-   * 
+   *
    * @throws IOException
    * @throws InterruptedException
    * @throws KeeperException
@@ -350,7 +349,7 @@ public class TestHBaseFsck {
 
   /**
    * delete table in preparation for next test
-   * 
+   *
    * @param tablename
    * @throws IOException
    */
@@ -406,13 +405,13 @@ public class TestHBaseFsck {
 
       // limit number of threads to 1.
       Configuration newconf = new Configuration(conf);
-      newconf.setInt("hbasefsck.numthreads", 1);  
+      newconf.setInt("hbasefsck.numthreads", 1);
       assertNoErrors(doFsck(newconf, false));
-      
+
       // We should pass without triggering a RejectedExecutionException
     } finally {
       deleteTable(table);
-    }    
+    }
   }
 
   /**
@@ -1174,16 +1173,11 @@ public class TestHBaseFsck {
         Bytes.toBytes("B"), Bytes.toBytes("BM"));
       HRegionInfo b = new HRegionInfo(tbl.getTableName(),
         Bytes.toBytes("BM"), Bytes.toBytes("C"));
-      Put p = new Put(hri.getRegionName());
+
       hri.setOffline(true);
       hri.setSplit(true);
-      p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(hri));
-      p.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
-        Writables.getBytes(a));
-      p.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
-        Writables.getBytes(b));
-      meta.put(p);
+
+      MetaEditor.addRegionToMeta(meta, hri, a, b);
       meta.flushCommits();
       TEST_UTIL.getHBaseAdmin().flush(HConstants.META_TABLE_NAME);
 
@@ -1255,7 +1249,7 @@ public class TestHBaseFsck {
       deleteTable(table);
     }
   }
-  
+
   /**
    * This creates and fixes a bad table with missing last region -- hole in meta and data missing in
    * the fs.

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java Tue Aug 28 03:40:47 2012
@@ -31,7 +31,14 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -42,7 +49,6 @@ import org.apache.hadoop.hbase.client.Re
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.Before;
@@ -52,10 +58,10 @@ import org.junit.experimental.categories
  * This testing base class creates a minicluster and testing table table
  * and shuts down the cluster afterwards. It also provides methods wipes out
  * meta and to inject errors into meta and the file system.
- * 
+ *
  * Tests should generally break stuff, then attempt to rebuild the meta table
  * offline, then restart hbase, and finally perform checks.
- * 
+ *
  * NOTE: This is a slow set of tests which takes ~30s each needs to run on a
  * relatively beefy machine. It seems necessary to have each test in a new jvm
  * since minicluster startup and tear downs seem to leak file handles and
@@ -107,7 +113,7 @@ public class OfflineMetaRebuildTestCore 
 
   /**
    * Setup a clean table before we start mucking with it.
-   * 
+   *
    * @throws IOException
    * @throws InterruptedException
    * @throws KeeperException
@@ -142,7 +148,7 @@ public class OfflineMetaRebuildTestCore 
 
   /**
    * delete table in preparation for next test
-   * 
+   *
    * @param tablename
    * @throws IOException
    */
@@ -211,11 +217,8 @@ public class OfflineMetaRebuildTestCore 
     out.close();
 
     // add to meta.
-    Put put = new Put(hri.getRegionName());
-    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(hri));
-    meta.put(put);
-    meta.flushCommits();
+    MetaEditor.addRegionToMeta(meta, hri);
+    meta.close();
     return hri;
   }
 
@@ -240,7 +243,7 @@ public class OfflineMetaRebuildTestCore 
   /**
    * Returns the number of rows in a given table. HBase must be up and the table
    * should be present (will wait for timeout for a while otherwise)
-   * 
+   *
    * @return # of rows in the specified table
    */
   protected int tableRowCount(Configuration conf, String table)
@@ -259,7 +262,7 @@ public class OfflineMetaRebuildTestCore 
 
   /**
    * Dumps .META. table info
-   * 
+   *
    * @return # of entries in meta.
    */
   protected int scanMeta() throws IOException {



Mime
View raw message