hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nspiegelb...@apache.org
Subject svn commit: r1176171 [7/9] - in /hbase/branches/0.89: ./ bin/ src/ src/assembly/ src/docs/src/documentation/content/xdocs/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/client/ src/main/java/org/apache/hadoop/hbase/filter...
Date Tue, 27 Sep 2011 02:41:20 GMT
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java Tue Sep 27 02:41:16 2011
@@ -19,28 +19,40 @@
  */
 package org.apache.hadoop.hbase.mapreduce;
 
-import java.io.DataInput;
-import java.io.DataOutput;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
+import java.util.Arrays;
 import java.util.Random;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.PerformanceEvaluation;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapreduce.InputFormat;
-import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * Simple test for {@link KeyValueSortReducer} and {@link HFileOutputFormat}.
@@ -49,144 +61,302 @@ import org.apache.hadoop.mapreduce.lib.o
  * emits keys and values like those of {@link PerformanceEvaluation}.  Makes
  * as many splits as "mapred.map.tasks" maps.
  */
-public class TestHFileOutputFormat extends HBaseTestCase {
+public class TestHFileOutputFormat  {
   private final static int ROWSPERSPLIT = 1024;
 
-  /*
-   * InputFormat that makes keys and values like those used in
-   * PerformanceEvaluation.  Makes as many splits as there are configured
-   * maps ("mapred.map.tasks").
-   */
-  static class PEInputFormat extends InputFormat<ImmutableBytesWritable, ImmutableBytesWritable> {
-    /* Split that holds nothing but split index.
-     */
-    static class PEInputSplit extends InputSplit implements Writable {
-      private int index = -1;
+  private static final byte[] FAMILY_NAME = PerformanceEvaluation.FAMILY_NAME;
+  private static final byte[] TABLE_NAME = Bytes.toBytes("TestTable");
 
-      PEInputSplit() {
-        super();
-      }
+  private HBaseTestingUtility util = new HBaseTestingUtility();
 
-      PEInputSplit(final int i) {
-        this.index = i;
-      }
+  private static Log LOG = LogFactory.getLog(TestHFileOutputFormat.class);
 
-      int getIndex() {
-        return this.index;
-      }
-
-      public long getLength() throws IOException, InterruptedException {
-        return ROWSPERSPLIT;
-      }
+  /**
+   * Simple mapper that makes KeyValue output.
+   */
+  static class RandomKVGeneratingMapper
+  extends Mapper<NullWritable, NullWritable,
+                 ImmutableBytesWritable, KeyValue> {
+
+    private int keyLength;
+    private static final int KEYLEN_DEFAULT=10;
+    private static final String KEYLEN_CONF="randomkv.key.length";
+
+    private int valLength;
+    private static final int VALLEN_DEFAULT=10;
+    private static final String VALLEN_CONF="randomkv.val.length";
 
-      public String [] getLocations() throws IOException, InterruptedException {
-        return new String [] {};
-      }
+    @Override
+    protected void setup(Context context) throws IOException,
+        InterruptedException {
+      super.setup(context);
 
-      public void readFields(DataInput in) throws IOException {
-        this.index = in.readInt();
-      }
+      Configuration conf = context.getConfiguration();
+      keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT);
+      valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT);
+    }
 
-      public void write(DataOutput out) throws IOException {
-        out.writeInt(this.index);
+    protected void map(
+        NullWritable n1, NullWritable n2,
+        Mapper<NullWritable, NullWritable,
+               ImmutableBytesWritable,KeyValue>.Context context)
+        throws java.io.IOException ,InterruptedException
+    {
+
+      byte keyBytes[] = new byte[keyLength];
+      byte valBytes[] = new byte[valLength];
+
+      Random random = new Random(System.currentTimeMillis());
+      for (int i = 0; i < ROWSPERSPLIT; i++) {
+
+        random.nextBytes(keyBytes);
+        random.nextBytes(valBytes);
+        ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);
+
+        KeyValue kv = new KeyValue(keyBytes, PerformanceEvaluation.FAMILY_NAME,
+            PerformanceEvaluation.QUALIFIER_NAME, valBytes);
+        context.write(key, kv);
       }
     }
+  }
 
-    public RecordReader<ImmutableBytesWritable, ImmutableBytesWritable> createRecordReader(
-        InputSplit split, TaskAttemptContext context) throws IOException,
-        InterruptedException {
-      final int startrow = ((PEInputSplit)split).getIndex() * ROWSPERSPLIT;
-      return new RecordReader<ImmutableBytesWritable, ImmutableBytesWritable>() {
-        // Starts at a particular row
-        private int counter = startrow;
-        private ImmutableBytesWritable key;
-        private ImmutableBytesWritable value;
-        private final Random random = new Random(System.currentTimeMillis());
-
-        public void close() throws IOException {
-          // Nothing to do.
-        }
-
-        public ImmutableBytesWritable getCurrentKey()
-        throws IOException, InterruptedException {
-          return this.key;
-        }
-
-        public ImmutableBytesWritable getCurrentValue()
-        throws IOException, InterruptedException {
-          return this.value;
-        }
-
-        public float getProgress() throws IOException, InterruptedException {
-          return ((float)(ROWSPERSPLIT - this.counter) / (float)this.counter);
-        }
-
-        public void initialize(InputSplit arg0, TaskAttemptContext arg1)
-            throws IOException, InterruptedException {
-          // Nothing to do.
-
-        }
+  @Before
+  public void cleanupDir() throws IOException {
+    util.cleanupTestDir();
+  }
 
-        public boolean nextKeyValue() throws IOException, InterruptedException {
-          if (this.counter - startrow > ROWSPERSPLIT) return false;
-          this.counter++;
-          this.key = new ImmutableBytesWritable(PerformanceEvaluation.format(this.counter));
-          this.value = new ImmutableBytesWritable(PerformanceEvaluation.generateValue(this.random));
-          return true;
-        }
-      };
-    }
 
-    public List<InputSplit> getSplits(JobContext context)
-    throws IOException, InterruptedException {
-      int count = context.getConfiguration().getInt("mapred.map.tasks", 1);
-      List<InputSplit> splits = new ArrayList<InputSplit>(count);
-      for (int i = 0; i < count; i++) {
-        splits.add(new PEInputSplit(i));
-      }
-      return splits;
-    }
+  private void setupRandomGeneratorMapper(Job job) {
+    job.setInputFormatClass(NMapInputFormat.class);
+    job.setMapperClass(RandomKVGeneratingMapper.class);
+    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
+    job.setMapOutputValueClass(KeyValue.class);
   }
 
   /**
-   * Simple mapper that makes KeyValue output.
+   * Test that {@link HFileOutputFormat} RecordWriter amends timestamps if
+   * passed a keyvalue whose timestamp is {@link HConstants#LATEST_TIMESTAMP}.
+   * @see <a href="https://issues.apache.org/jira/browse/HBASE-2615">HBASE-2615</a>
    */
-  static class PEtoKVMapper extends Mapper<ImmutableBytesWritable, ImmutableBytesWritable, ImmutableBytesWritable, KeyValue> {
-    protected void map(ImmutableBytesWritable key, ImmutableBytesWritable value,
-      org.apache.hadoop.mapreduce.Mapper<ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable,KeyValue>.Context context)
-    throws java.io.IOException ,InterruptedException {
-      context.write(key, new KeyValue(key.get(), PerformanceEvaluation.FAMILY_NAME,
-        PerformanceEvaluation.QUALIFIER_NAME, value.get()));
+  @Test
+  public void test_LATEST_TIMESTAMP_isReplaced()
+  throws IOException, InterruptedException {
+    Configuration conf = new Configuration(this.util.getConfiguration());
+    RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
+    TaskAttemptContext context = null;
+    Path dir =
+      HBaseTestingUtility.getTestDir("test_LATEST_TIMESTAMP_isReplaced");
+    try {
+      Job job = new Job(conf);
+      FileOutputFormat.setOutputPath(job, dir);
+      context = new TaskAttemptContext(job.getConfiguration(),
+        new TaskAttemptID());
+      HFileOutputFormat hof = new HFileOutputFormat();
+      writer = hof.getRecordWriter(context);
+      final byte [] b = Bytes.toBytes("b");
+
+      // Test 1.  Pass a KV that has a ts of LATEST_TIMESTAMP.  It should be
+      // changed by call to write.  Check all in kv is same but ts.
+      KeyValue kv = new KeyValue(b, b, b);
+      KeyValue original = kv.clone();
+      writer.write(new ImmutableBytesWritable(), kv);
+      assertFalse(original.equals(kv));
+      assertTrue(Bytes.equals(original.getRow(), kv.getRow()));
+      assertTrue(original.matchingColumn(kv.getFamily(), kv.getQualifier()));
+      assertNotSame(original.getTimestamp(), kv.getTimestamp());
+      assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp());
+
+      // Test 2. Now test passing a kv that has explicit ts.  It should not be
+      // changed by call to record write.
+      kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b);
+      original = kv.clone();
+      writer.write(new ImmutableBytesWritable(), kv);
+      assertTrue(original.equals(kv));
+    } finally {
+      if (writer != null && context != null) writer.close(context);
+      dir.getFileSystem(conf).delete(dir, true);
     }
   }
 
   /**
    * Run small MR job.
    */
+  @Test
   public void testWritingPEData() throws Exception {
+    Configuration conf = util.getConfiguration();
+    Path testDir = HBaseTestingUtility.getTestDir("testWritingPEData");
+    FileSystem fs = testDir.getFileSystem(conf);
+
     // Set down this value or we OOME in eclipse.
-    this.conf.setInt("io.sort.mb", 20);
+    conf.setInt("io.sort.mb", 20);
     // Write a few files.
-    this.conf.setLong("hbase.hregion.max.filesize", 64 * 1024);
-    Job job = new Job(this.conf, getName());
-    job.setInputFormatClass(TestHFileOutputFormat.PEInputFormat.class);
-    job.setMapperClass(TestHFileOutputFormat.PEtoKVMapper.class);
-    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
-    job.setMapOutputValueClass(KeyValue.class);
+    conf.setLong("hbase.hregion.max.filesize", 64 * 1024);
+
+    Job job = new Job(conf, "testWritingPEData");
+    setupRandomGeneratorMapper(job);
     // This partitioner doesn't work well for number keys but using it anyways
     // just to demonstrate how to configure it.
+    byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT];
+    byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT];
+
+    Arrays.fill(startKey, (byte)0);
+    Arrays.fill(endKey, (byte)0xff);
+
     job.setPartitionerClass(SimpleTotalOrderPartitioner.class);
     // Set start and end rows for partitioner.
-    job.getConfiguration().set(SimpleTotalOrderPartitioner.START,
-      Bytes.toString(PerformanceEvaluation.format(0)));
-    int rows = this.conf.getInt("mapred.map.tasks", 1) * ROWSPERSPLIT;
-    job.getConfiguration().set(SimpleTotalOrderPartitioner.END,
-      Bytes.toString(PerformanceEvaluation.format(rows)));
+    SimpleTotalOrderPartitioner.setStartKey(job.getConfiguration(), startKey);
+    SimpleTotalOrderPartitioner.setEndKey(job.getConfiguration(), endKey);
     job.setReducerClass(KeyValueSortReducer.class);
     job.setOutputFormatClass(HFileOutputFormat.class);
-    FileOutputFormat.setOutputPath(job, this.testDir);
+    job.setNumReduceTasks(4);
+
+    FileOutputFormat.setOutputPath(job, testDir);
     assertTrue(job.waitForCompletion(false));
-    FileStatus [] files = this.fs.listStatus(this.testDir);
+    FileStatus [] files = fs.listStatus(testDir);
     assertTrue(files.length > 0);
   }
-}
\ No newline at end of file
+
+  @Test
+  public void testJobConfiguration() throws Exception {
+    Job job = new Job();
+    HTable table = Mockito.mock(HTable.class);
+    byte[][] mockKeys = new byte[][] {
+        HConstants.EMPTY_BYTE_ARRAY,
+        Bytes.toBytes("aaa"),
+        Bytes.toBytes("ggg"),
+        Bytes.toBytes("zzz")
+    };
+    Mockito.doReturn(mockKeys).when(table).getStartKeys();
+
+    HFileOutputFormat.configureIncrementalLoad(job, table);
+    assertEquals(job.getNumReduceTasks(), 4);
+  }
+
+  private byte [][] generateRandomStartKeys(int numKeys) {
+    Random random = new Random();
+    byte[][] ret = new byte[numKeys][];
+    // first region start key is always empty
+    ret[0] = HConstants.EMPTY_BYTE_ARRAY;
+    for (int i = 1; i < numKeys; i++) {
+      ret[i] = PerformanceEvaluation.generateValue(random);
+    }
+    return ret;
+  }
+
+  @Test
+  public void testMRIncrementalLoad() throws Exception {
+    doIncrementalLoadTest(false);
+  }
+
+  @Test
+  public void testMRIncrementalLoadWithSplit() throws Exception {
+    doIncrementalLoadTest(true);
+  }
+
+  private void doIncrementalLoadTest(
+      boolean shouldChangeRegions) throws Exception {
+    Configuration conf = util.getConfiguration();
+    Path testDir = HBaseTestingUtility.getTestDir("testLocalMRIncrementalLoad");
+    byte[][] startKeys = generateRandomStartKeys(5);
+
+    try {
+      util.startMiniCluster();
+      HBaseAdmin admin = new HBaseAdmin(conf);
+      HTable table = util.createTable(TABLE_NAME, FAMILY_NAME);
+      int numRegions = util.createMultiRegions(
+          util.getConfiguration(), table, FAMILY_NAME,
+          startKeys);
+      assertEquals("Should make 5 regions",
+          numRegions, 5);
+      assertEquals("Should start with empty table",
+          0, util.countRows(table));
+
+      // Generate the bulk load files
+      util.startMiniMapReduceCluster();
+      runIncrementalPELoad(conf, table, testDir);
+      // This doesn't write into the table, just makes files
+      assertEquals("HFOF should not touch actual table",
+          0, util.countRows(table));
+
+      if (shouldChangeRegions) {
+        LOG.info("Changing regions in table");
+        admin.disableTable(table.getTableName());
+        byte[][] newStartKeys = generateRandomStartKeys(15);
+        util.createMultiRegions(util.getConfiguration(),
+            table, FAMILY_NAME, newStartKeys);
+        admin.enableTable(table.getTableName());
+        while (table.getRegionsInfo().size() != 15 ||
+            !admin.isTableAvailable(table.getTableName())) {
+          Thread.sleep(1000);
+          LOG.info("Waiting for new region assignment to happen");
+        }
+      }
+
+      // Perform the actual load
+      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
+
+      // Ensure data shows up
+      int expectedRows = conf.getInt("mapred.map.tasks", 1) * ROWSPERSPLIT;
+      assertEquals("LoadIncrementalHFiles should put expected data in table",
+          expectedRows, util.countRows(table));
+      String tableDigestBefore = util.checksumRows(table);
+
+      // Cause regions to reopen
+      admin.disableTable(TABLE_NAME);
+      while (table.getRegionsInfo().size() != 0) {
+        Thread.sleep(1000);
+        LOG.info("Waiting for table to disable");
+      }
+      admin.enableTable(TABLE_NAME);
+      util.waitTableAvailable(TABLE_NAME, 30000);
+
+      assertEquals("Data should remain after reopening of regions",
+          tableDigestBefore, util.checksumRows(table));
+    } finally {
+      util.shutdownMiniMapReduceCluster();
+      util.shutdownMiniCluster();
+    }
+  }
+
+
+
+  private void runIncrementalPELoad(
+      Configuration conf, HTable table, Path outDir)
+  throws Exception {
+    Job job = new Job(conf, "testLocalMRIncrementalLoad");
+    setupRandomGeneratorMapper(job);
+    HFileOutputFormat.configureIncrementalLoad(job, table);
+    FileOutputFormat.setOutputPath(job, outDir);
+
+    assertEquals(table.getRegionsInfo().size(),
+        job.getNumReduceTasks());
+
+    assertTrue(job.waitForCompletion(true));
+  }
+
+  public static void main(String args[]) throws Exception {
+    new TestHFileOutputFormat().manualTest(args);
+  }
+
+  public void manualTest(String args[]) throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    util = new HBaseTestingUtility(conf);
+    if ("newtable".equals(args[0])) {
+      byte[] tname = args[1].getBytes();
+      HTable table = util.createTable(tname, FAMILY_NAME);
+      HBaseAdmin admin = new HBaseAdmin(conf);
+      admin.disableTable(tname);
+      util.createMultiRegions(conf, table, FAMILY_NAME,
+          generateRandomStartKeys(5));
+      admin.enableTable(tname);
+    } else if ("incremental".equals(args[0])) {
+      byte[] tname = args[1].getBytes();
+      HTable table = new HTable(conf, tname);
+      Path outDir = new Path("incremental-out");
+      runIncrementalPELoad(conf, table, outDir);
+    } else {
+      throw new RuntimeException(
+          "usage: TestHFileOutputFormat newtable | incremental");
+    }
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java?rev=1176171&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java Tue Sep 27 02:41:16 2011
@@ -0,0 +1,71 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mapreduce;
+
+import java.util.ArrayList;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser;
+import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.BadTsvLineException;
+import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.ParsedLine;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+
+import static org.junit.Assert.*;
+
+public class TestImportTsv {
+  @Test
+  public void testTsvParser() throws BadTsvLineException {
+    TsvParser parser = new TsvParser("col_a,col_b:qual,HBASE_ROW_KEY,col_d", "\t");
+    assertBytesEquals(Bytes.toBytes("col_a"), parser.getFamily(0));
+    assertBytesEquals(HConstants.EMPTY_BYTE_ARRAY, parser.getQualifier(0));
+    assertBytesEquals(Bytes.toBytes("col_b"), parser.getFamily(1));
+    assertBytesEquals(Bytes.toBytes("qual"), parser.getQualifier(1));
+    assertNull(parser.getFamily(2));
+    assertNull(parser.getQualifier(2));
+
+    byte[] line = Bytes.toBytes("val_a\tval_b\tval_c\tval_d");
+    ParsedLine parsed = parser.parse(line, line.length);
+    checkParsing(parsed, Splitter.on("\t").split(Bytes.toString(line)));
+    assertEquals(2, parser.getRowKeyColumnIndex());
+  }
+
+  private void checkParsing(ParsedLine parsed, Iterable<String> expected) {
+    ArrayList<String> parsedCols = new ArrayList<String>();
+    for (int i = 0; i < parsed.getColumnCount(); i++) {
+      parsedCols.add(Bytes.toString(
+          parsed.getLineBytes(),
+          parsed.getColumnOffset(i),
+          parsed.getColumnLength(i)));
+    }
+    if (!Iterables.elementsEqual(parsedCols, expected)) {
+      fail("Expected: " + Joiner.on(",").join(expected) + "\n" +
+          "Got:" + Joiner.on(",").join(parsedCols));
+    }
+  }
+
+  private void assertBytesEquals(byte[] a, byte[] b) {
+    assertEquals(Bytes.toStringBinary(a), Bytes.toStringBinary(b));
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java?rev=1176171&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java Tue Sep 27 02:41:16 2011
@@ -0,0 +1,188 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mapreduce;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.io.hfile.Compression;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/**
+ * Test cases for the "load" half of the HFileOutputFormat bulk load
+ * functionality. These tests run faster than the full MR cluster
+ * tests in TestHFileOutputFormat
+ */
+public class TestLoadIncrementalHFiles {
+
+  private static final byte[] TABLE = Bytes.toBytes("mytable");
+  private static final byte[] QUALIFIER = Bytes.toBytes("myqual");
+  private static final byte[] FAMILY = Bytes.toBytes("myfam");
+
+  private static final byte[][] SPLIT_KEYS = new byte[][] {
+    Bytes.toBytes("ddd"),
+    Bytes.toBytes("ppp")
+  };
+
+  public static int BLOCKSIZE = 64*1024;
+  public static String COMPRESSION =
+    Compression.Algorithm.NONE.getName();
+
+  private HBaseTestingUtility util = new HBaseTestingUtility();
+
+  /**
+   * Test case that creates some regions and loads
+   * HFiles that fit snugly inside those regions
+   */
+  @Test
+  public void testSimpleLoad() throws Exception {
+    runTest("testSimpleLoad",
+        new byte[][][] {
+          new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
+          new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
+    });
+  }
+
+  /**
+   * Test case that creates some regions and loads
+   * HFiles that cross the boundaries of those regions
+   */
+  @Test
+  public void testRegionCrossingLoad() throws Exception {
+    runTest("testRegionCrossingLoad",
+        new byte[][][] {
+          new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
+          new byte[][]{ Bytes.toBytes("fff"), Bytes.toBytes("zzz") },
+    });
+  }
+
+  private void runTest(String testName, byte[][][] hfileRanges)
+  throws Exception {
+    Path dir = HBaseTestingUtility.getTestDir(testName);
+    FileSystem fs = util.getTestFileSystem();
+    dir = dir.makeQualified(fs);
+    Path familyDir = new Path(dir, Bytes.toString(FAMILY));
+
+    int hfileIdx = 0;
+    for (byte[][] range : hfileRanges) {
+      byte[] from = range[0];
+      byte[] to = range[1];
+      createHFile(fs, new Path(familyDir, "hfile_" + hfileIdx++),
+          FAMILY, QUALIFIER, from, to, 1000);
+    }
+    int expectedRows = hfileIdx * 1000;
+
+
+    util.startMiniCluster();
+    try {
+      HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
+      HTableDescriptor htd = new HTableDescriptor(TABLE);
+      htd.addFamily(new HColumnDescriptor(FAMILY));
+      admin.createTable(htd, SPLIT_KEYS);
+
+      HTable table = new HTable(TABLE);
+      util.waitTableAvailable(TABLE, 30000);
+      LoadIncrementalHFiles loader = new LoadIncrementalHFiles(
+          util.getConfiguration());
+      loader.doBulkLoad(dir, table);
+
+      assertEquals(expectedRows, util.countRows(table));
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  @Test
+  public void testSplitStoreFile() throws IOException {
+    Path dir = HBaseTestingUtility.getTestDir("testSplitHFile");
+    FileSystem fs = util.getTestFileSystem();
+    Path testIn = new Path(dir, "testhfile");
+    HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
+    createHFile(fs, testIn, FAMILY, QUALIFIER,
+        Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);
+
+    Path bottomOut = new Path(dir, "bottom.out");
+    Path topOut = new Path(dir, "top.out");
+
+    LoadIncrementalHFiles.splitStoreFile(
+        util.getConfiguration(), testIn,
+        familyDesc, Bytes.toBytes("ggg"),
+        bottomOut,
+        topOut);
+
+    int rowCount = verifyHFile(bottomOut);
+    rowCount += verifyHFile(topOut);
+    assertEquals(1000, rowCount);
+  }
+
+  private int verifyHFile(Path p) throws IOException {
+    Configuration conf = util.getConfiguration();
+    HFile.Reader reader = new HFile.Reader(
+        p.getFileSystem(conf), p, null, false);
+    reader.loadFileInfo();
+    HFileScanner scanner = reader.getScanner(false, false);
+    scanner.seekTo();
+    int count = 0;
+    do {
+      count++;
+    } while (scanner.next());
+    assertTrue(count > 0);
+    return count;
+  }
+
+
+  /**
+   * Create an HFile with the given number of rows between a given
+   * start key and end key.
+   * TODO put me in an HFileTestUtil or something?
+   */
+  static void createHFile(
+      FileSystem fs, Path path,
+      byte[] family, byte[] qualifier,
+      byte[] startKey, byte[] endKey, int numRows) throws IOException
+  {
+    HFile.Writer writer = new HFile.Writer(fs, path, BLOCKSIZE, COMPRESSION,
+        KeyValue.KEY_COMPARATOR);
+    try {
+      // subtract 2 since iterateOnSplits doesn't include boundary keys
+      for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
+        KeyValue kv = new KeyValue(key, family, qualifier, key);
+        writer.append(kv);
+      }
+    } finally {
+      writer.close();
+    }
+  }
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java Tue Sep 27 02:41:16 2011
@@ -242,4 +242,17 @@ public class TestTableMapReduce extends 
       scanner.close();
     }
   }
+
+  /**
+   * Test that we add tmpjars correctly including the ZK jar.
+   */
+  public void testAddDependencyJars() throws Exception {
+    Job job = new Job();
+    TableMapReduceUtil.addDependencyJars(job);
+    String tmpjars = job.getConfiguration().get("tmpjars");
+
+    System.err.println("tmpjars: " + tmpjars);
+    assertTrue(tmpjars.contains("zookeeper"));
+    assertTrue(tmpjars.contains("guava"));
+  }
 }

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java?rev=1176171&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java Tue Sep 27 02:41:16 2011
@@ -0,0 +1,128 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.HMsg;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestMaster {
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final Log LOG = LogFactory.getLog(TestMasterWithDisabling.class);
+  private static final byte[] TABLENAME = Bytes.toBytes("TestMaster");
+  private static final byte[] FAMILYNAME = Bytes.toBytes("fam");
+
+  @BeforeClass
+  public static void beforeAllTests() throws Exception {
+    // Start a cluster of two regionservers.
+    TEST_UTIL.startMiniCluster(1);
+  }
+
+  @AfterClass
+  public static void afterAllTests() throws IOException {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testMasterOpsWhileSplitting() throws Exception {
+    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+    HMaster m = cluster.getMaster();
+    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+
+    TEST_UTIL.createTable(TABLENAME, FAMILYNAME);
+    TEST_UTIL.loadTable(new HTable(TABLENAME), FAMILYNAME);
+
+    CountDownLatch aboutToOpen = new CountDownLatch(1);
+    CountDownLatch proceed = new CountDownLatch(1);
+    RegionOpenListener list = new RegionOpenListener(aboutToOpen, proceed);
+    m.getRegionServerOperationQueue().registerRegionServerOperationListener(list);
+
+    admin.split(TABLENAME);
+    aboutToOpen.await(60, TimeUnit.SECONDS);
+
+    try {
+      m.getTableRegions(TABLENAME);
+      Pair<HRegionInfo,HServerAddress> pair =
+        m.getTableRegionClosest(TABLENAME, Bytes.toBytes("cde"));
+      assertNull(pair);
+      /**
+       * TODO: these methods return null when the regions are not deployed.
+       * These tests should be uncommented after HBASE-2656.
+      assertNotNull(pair);
+      m.getTableRegionFromName(pair.getFirst().getRegionName());
+      */
+    } finally {
+      proceed.countDown();
+    }
+  }
+
+  static class RegionOpenListener implements RegionServerOperationListener {
+    CountDownLatch aboutToOpen, proceed;
+
+    public RegionOpenListener(
+      CountDownLatch aboutToOpen, CountDownLatch proceed)
+    {
+      this.aboutToOpen = aboutToOpen;
+      this.proceed = proceed;
+    }
+
+    @Override
+    public boolean process(HServerInfo serverInfo, HMsg incomingMsg) {
+      if (!incomingMsg.isType(HMsg.Type.MSG_REPORT_OPEN)) {
+        return true;
+      }
+      try {
+        aboutToOpen.countDown();
+        proceed.await(60, TimeUnit.SECONDS);
+      } catch (InterruptedException ie) {
+        throw new RuntimeException(ie);
+      }
+      return true;
+    }
+
+    @Override
+    public boolean process(RegionServerOperation op) throws IOException {
+      return true;
+    }
+
+    @Override
+    public void processed(RegionServerOperation op) {
+    }
+  }
+
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java Tue Sep 27 02:41:16 2011
@@ -74,6 +74,10 @@ public class TestMasterTransitions {
    */
   @BeforeClass public static void beforeAllTests() throws Exception {
     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
+    // Parcel out the regions, don't give them out in big lumps.  We've only
+    // a few in this test.  Let a couple of cycles pass is more realistic and
+    // gives stuff a chance to work.
+    TEST_UTIL.getConfiguration().setInt("hbase.regions.percheckin", 2);
     // Start a cluster of two regionservers.
     TEST_UTIL.startMiniCluster(2);
     // Create a table of three families.  This will assign a region.

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java?rev=1176171&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java Tue Sep 27 02:41:16 2011
@@ -0,0 +1,75 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestMasterWrongRS {
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  @BeforeClass
+  public static void beforeAllTests() throws Exception {
+    TEST_UTIL.startMiniCluster(3);
+  }
+
+  @AfterClass
+  public static void afterAllTests() throws IOException {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Test when region servers start reporting with the wrong address
+   * or start code. Currently the decision is to shut them down.
+   * See HBASE-2613
+   * @throws Exception
+   */
+  @Test
+  public void testRsReportsWrongServerName() throws Exception {
+    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+    MiniHBaseClusterRegionServer firstServer =
+      (MiniHBaseClusterRegionServer)cluster.getRegionServer(0);
+    HRegionServer secondServer = cluster.getRegionServer(1);
+    HServerInfo hsi = firstServer.getServerInfo();
+    firstServer.setHServerInfo(new HServerInfo(hsi.getServerAddress(),
+      hsi.getInfoPort(), hsi.getHostname()));
+    // Sleep while the region server pings back
+    Thread.sleep(2000);
+    assertTrue(firstServer.isOnline());
+    assertEquals(2, cluster.getLiveRegionServerThreads().size());
+
+    secondServer.getHServerInfo().setServerAddress(new HServerAddress("0.0.0.0", 60010));
+    Thread.sleep(2000);
+    assertTrue(secondServer.isOnline());
+    assertEquals(1, cluster.getLiveRegionServerThreads().size());
+  }
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestOldLogsCleaner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestOldLogsCleaner.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestOldLogsCleaner.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestOldLogsCleaner.java Tue Sep 27 02:41:16 2011
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master;
 
 import static org.junit.Assert.assertEquals;
 
+import org.apache.hadoop.fs.FileStatus;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -34,6 +35,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.conf.Configuration;
 
+import java.net.URLEncoder;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 public class TestOldLogsCleaner {
@@ -74,32 +76,41 @@ public class TestOldLogsCleaner {
     Configuration c = TEST_UTIL.getConfiguration();
     Path oldLogDir = new Path(TEST_UTIL.getTestDir(),
         HConstants.HREGION_OLDLOGDIR_NAME);
+    String fakeMachineName = URLEncoder.encode("regionserver:60020", "UTF8");
 
     FileSystem fs = FileSystem.get(c);
     AtomicBoolean stop = new AtomicBoolean(false);
     OldLogsCleaner cleaner = new OldLogsCleaner(1000, stop,c, fs, oldLogDir);
 
+    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
     long now = System.currentTimeMillis();
     fs.delete(oldLogDir, true);
     fs.mkdirs(oldLogDir);
     fs.createNewFile(new Path(oldLogDir, "a"));
-    fs.createNewFile(new Path(oldLogDir, "1.hlog.dat.a"));
-    fs.createNewFile(new Path(oldLogDir, "1.hlog.dat." + now));
-    for(int i = 0; i < 30; i++) {
-      fs.createNewFile(new Path(oldLogDir, 1 + "hlog.dat." +
-          (now - 6000000 - i)));
+    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
+    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));
+    System.out.println("Now is: " + now);
+    for (int i = 0; i < 30; i++) {
+      fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now - 6000000 - i) ));
     }
-    fs.createNewFile(new Path(oldLogDir, "a.hlog.dat." + (now + 10000)));
+    for (FileStatus stat : fs.listStatus(oldLogDir)) {
+      System.out.println(stat.getPath().toString());
+    }
+
+    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));
 
     assertEquals(34, fs.listStatus(oldLogDir).length);
 
+    // This will take care of 20 old log files (default max we can delete)
     cleaner.chore();
 
     assertEquals(14, fs.listStatus(oldLogDir).length);
 
+    // We will delete all remaining log files and those that are invalid
     cleaner.chore();
 
-    assertEquals(1, fs.listStatus(oldLogDir).length);
+    // We end up with the current log file and a newer one
+    assertEquals(2, fs.listStatus(oldLogDir).length);
   }
 
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Tue Sep 27 02:41:16 2011
@@ -76,7 +76,7 @@ public class TestCompaction extends HBas
     this.r = createNewHRegion(htd, null, null);
     this.compactionDir = HRegion.getCompactionDir(this.r.getBaseDir());
     this.regionCompactionDir = new Path(this.compactionDir,
-        Integer.toString(this.r.getRegionInfo().getEncodedName()));
+                        this.r.getRegionInfo().getEncodedName());
   }
 
   @Override
@@ -207,8 +207,7 @@ public class TestCompaction extends HBas
     // they were deleted.
     int count = 0;
     boolean containsStartRow = false;
-    for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles().
-        values()) {
+    for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) {
       HFileScanner scanner = f.getReader().getScanner(false, false);
       scanner.seekTo();
       do {
@@ -239,7 +238,7 @@ public class TestCompaction extends HBas
   private int count() throws IOException {
     int count = 0;
     for (StoreFile f: this.r.stores.
-        get(COLUMN_FAMILY_TEXT).getStorefiles().values()) {
+        get(COLUMN_FAMILY_TEXT).getStorefiles()) {
       HFileScanner scanner = f.getReader().getScanner(false, false);
       if (!scanner.seekTo()) {
         continue;

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java?rev=1176171&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java Tue Sep 27 02:41:16 2011
@@ -0,0 +1,233 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.ref.SoftReference;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FilterFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+
+/**
+ * Test cases that ensure that file system level errors are bubbled up
+ * appropriately to clients, rather than swallowed.
+ */
+public class TestFSErrorsExposed {
+  private static final Log LOG = LogFactory.getLog(TestFSErrorsExposed.class);
+
+  HBaseTestingUtility util = new HBaseTestingUtility();
+
+  /**
+   * Injects errors into the pread calls of an on-disk file, and makes
+   * sure those bubble up to the HFile scanner
+   */
+  @Test
+  public void testHFileScannerThrowsErrors() throws IOException {
+    Path hfilePath = new Path(new Path(
+        HBaseTestingUtility.getTestDir("internalScannerExposesErrors"),
+        "regionname"), "familyname");
+    FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
+    StoreFile.Writer writer = StoreFile.createWriter(fs, hfilePath, 2*1024);
+    TestStoreFile.writeStoreFile(
+        writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
+
+    StoreFile sf = new StoreFile(fs, writer.getPath(), false,
+        util.getConfiguration(), StoreFile.BloomType.NONE, false);
+    HFile.Reader reader = sf.createReader();
+    HFileScanner scanner = reader.getScanner(false, true);
+
+    FaultyInputStream inStream = fs.inStreams.get(0).get();
+    assertNotNull(inStream);
+
+    scanner.seekTo();
+    // Do at least one successful read
+    assertTrue(scanner.next());
+
+    inStream.startFaults();
+
+    try {
+      int scanned=0;
+      while (scanner.next()) {
+        scanned++;
+      }
+      fail("Scanner didn't throw after faults injected");
+    } catch (IOException ioe) {
+      LOG.info("Got expected exception", ioe);
+      assertTrue(ioe.getMessage().contains("Fault"));
+    }
+    reader.close();
+  }
+
+  /**
+   * Injects errors into the pread calls of an on-disk file, and makes
+   * sure those bubble up to the StoreFileScanner
+   */
+  @Test
+  public void testStoreFileScannerThrowsErrors() throws IOException {
+    Path hfilePath = new Path(new Path(
+        HBaseTestingUtility.getTestDir("internalScannerExposesErrors"),
+        "regionname"), "familyname");
+    FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
+    HFile.Writer writer = StoreFile.createWriter(fs, hfilePath, 2 * 1024);
+    TestStoreFile.writeStoreFile(
+        writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
+
+    StoreFile sf = new StoreFile(fs, writer.getPath(), false,
+        util.getConfiguration(), BloomType.NONE, false);
+    List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(
+        Collections.singletonList(sf), false, true);
+    KeyValueScanner scanner = scanners.get(0);
+
+    FaultyInputStream inStream = fs.inStreams.get(0).get();
+    assertNotNull(inStream);
+
+    scanner.seek(KeyValue.LOWESTKEY);
+    // Do at least one successful read
+    assertNotNull(scanner.next());
+
+    inStream.startFaults();
+
+    try {
+      int scanned=0;
+      while (scanner.next() != null) {
+        scanned++;
+      }
+      fail("Scanner didn't throw after faults injected");
+    } catch (IOException ioe) {
+      LOG.info("Got expected exception", ioe);
+      assertTrue(ioe.getMessage().contains("Could not iterate"));
+    }
+    scanner.close();
+  }
+
+  /**
+   * Cluster test which starts a region server with a region, then
+   * removes the data from HDFS underneath it, and ensures that
+   * errors are bubbled to the client.
+   */
+  @Test
+  public void testFullSystemBubblesFSErrors() throws Exception {
+    try {
+      util.startMiniCluster(1);
+      byte[] tableName = Bytes.toBytes("table");
+      byte[] fam = Bytes.toBytes("fam");
+
+      HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
+      HTableDescriptor desc = new HTableDescriptor(tableName);
+      desc.addFamily(new HColumnDescriptor(
+          fam, 1, HColumnDescriptor.DEFAULT_COMPRESSION,
+          false, false, HConstants.FOREVER, "NONE"));
+      admin.createTable(desc);
+
+      HTable table = new HTable(tableName);
+
+      // Load some data
+      util.loadTable(table, fam);
+      table.flushCommits();
+      util.flush();
+      util.countRows(table);
+
+      // Kill the DFS cluster
+      util.getDFSCluster().shutdownDataNodes();
+
+      try {
+        util.countRows(table);
+        fail("Did not fail to count after removing data");
+      } catch (RuntimeException rte) {
+        // We get RTE instead of IOE since java Iterable<?> doesn't throw
+        // IOE
+        LOG.info("Got expected error", rte);
+        assertTrue(rte.getMessage().contains("Could not seek"));
+      }
+
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  static class FaultyFileSystem extends FilterFileSystem {
+    List<SoftReference<FaultyInputStream>> inStreams =
+      new ArrayList<SoftReference<FaultyInputStream>>();
+
+    public FaultyFileSystem(FileSystem testFileSystem) {
+      super(testFileSystem);
+    }
+
+    @Override
+    public FSDataInputStream open(Path p, int bufferSize) throws IOException  {
+      FSDataInputStream orig = fs.open(p, bufferSize);
+      FaultyInputStream faulty = new FaultyInputStream(orig);
+      inStreams.add(new SoftReference<FaultyInputStream>(faulty));
+      return faulty;
+    }
+  }
+
+  static class FaultyInputStream extends FSDataInputStream {
+    boolean faultsStarted = false;
+
+    public FaultyInputStream(InputStream in) throws IOException {
+      super(in);
+    }
+
+    public void startFaults() {
+      faultsStarted = true;
+    }
+
+    public int read(long position, byte[] buffer, int offset, int length)
+      throws IOException {
+      injectFault();
+      return ((PositionedReadable)in).read(position, buffer, offset, length);
+    }
+
+    private void injectFault() throws IOException {
+      if (faultsStarted) {
+        throw new IOException("Fault injected");
+      }
+    }
+  }
+
+
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java Tue Sep 27 02:41:16 2011
@@ -111,7 +111,7 @@ public class TestGetClosestAtOrBefore ex
     findRow(mr, 'C', 43, 42);
     // Now delete 'C' and make sure I don't get entries from 'B'.
     byte [] firstRowInC = HRegionInfo.createRegionName(Bytes.toBytes("" + 'C'),
-      HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES);
+      HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES, false);
     Scan scan = new Scan(firstRowInC);
     s = mr.getScanner(scan);
     try {
@@ -150,7 +150,7 @@ public class TestGetClosestAtOrBefore ex
     // Find the row.
     byte [] tofindBytes = Bytes.toBytes((short)rowToFind);
     byte [] metaKey = HRegionInfo.createRegionName(tableb, tofindBytes,
-      HConstants.NINES);
+      HConstants.NINES, false);
     LOG.info("find=" + new String(metaKey));
     Result r = mr.getClosestRowBefore(metaKey);
     if (answer == -1) {

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Tue Sep 27 02:41:16 2011
@@ -24,12 +24,12 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
@@ -45,6 +45,9 @@ import org.apache.hadoop.hbase.filter.Pr
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
+import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
+import org.apache.hadoop.hbase.util.Threads;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -89,6 +92,12 @@ public class TestHRegion extends HBaseTe
     super.setUp();
   }
 
+  @Override
+  protected void tearDown() throws Exception {
+    super.tearDown();
+    EnvironmentEdgeManagerTestHelper.reset();
+  }
+
   //////////////////////////////////////////////////////////////////////////////
   // New tests that doesn't spin up a mini cluster but rather just test the
   // individual code pieces in the HRegion. Putting files locally in
@@ -99,7 +108,7 @@ public class TestHRegion extends HBaseTe
     HBaseConfiguration hc = initSplit();
     int numRows = 100;
     byte [][] families = {fam1, fam2, fam3};
-    
+
     //Setting up region
     String method = this.getName();
     initHRegion(tableName, method, hc, families);
@@ -320,9 +329,9 @@ public class TestHRegion extends HBaseTe
   }
 
   //////////////////////////////////////////////////////////////////////////////
-  // checkAndPut tests
+  // checkAndMutate tests
   //////////////////////////////////////////////////////////////////////////////
-  public void testCheckAndPut_WithEmptyRowValue() throws IOException {
+  public void testCheckAndMutate_WithEmptyRowValue() throws IOException {
     byte [] tableName = Bytes.toBytes("testtable");
     byte [] row1 = Bytes.toBytes("row1");
     byte [] fam1 = Bytes.toBytes("fam1");
@@ -340,22 +349,40 @@ public class TestHRegion extends HBaseTe
     put.add(fam1, qf1, val1);
 
     //checkAndPut with correct value
-    boolean res = region.checkAndPut(row1, fam1, qf1, emptyVal, put, lockId,
+    boolean res = region.checkAndMutate(row1, fam1, qf1, emptyVal, put, lockId,
         true);
     assertTrue(res);
 
     // not empty anymore
-    res = region.checkAndPut(row1, fam1, qf1, emptyVal, put, lockId, true);
+    res = region.checkAndMutate(row1, fam1, qf1, emptyVal, put, lockId, true);
+    assertFalse(res);
+
+    Delete delete = new Delete(row1);
+    delete.deleteColumn(fam1, qf1);
+    res = region.checkAndMutate(row1, fam1, qf1, emptyVal, delete, lockId,
+        true);
     assertFalse(res);
 
     put = new Put(row1);
     put.add(fam1, qf1, val2);
     //checkAndPut with correct value
-    res = region.checkAndPut(row1, fam1, qf1, val1, put, lockId, true);
+    res = region.checkAndMutate(row1, fam1, qf1, val1, put, lockId, true);
+    assertTrue(res);
+
+    //checkAndDelete with correct value
+    delete = new Delete(row1);
+    delete.deleteColumn(fam1, qf1);
+    delete.deleteColumn(fam1, qf1);
+    res = region.checkAndMutate(row1, fam1, qf1, val2, delete, lockId, true);
+    assertTrue(res);
+
+    delete = new Delete(row1);
+    res = region.checkAndMutate(row1, fam1, qf1, emptyVal, delete, lockId,
+        true);
     assertTrue(res);
   }
 
-  public void testCheckAndPut_WithWrongValue() throws IOException{
+  public void testCheckAndMutate_WithWrongValue() throws IOException{
     byte [] tableName = Bytes.toBytes("testtable");
     byte [] row1 = Bytes.toBytes("row1");
     byte [] fam1 = Bytes.toBytes("fam1");
@@ -374,11 +401,17 @@ public class TestHRegion extends HBaseTe
     region.put(put);
 
     //checkAndPut with wrong value
-    boolean res = region.checkAndPut(row1, fam1, qf1, val2, put, lockId, true);
+    boolean res = region.checkAndMutate(row1, fam1, qf1, val2, put, lockId, true);
+    assertEquals(false, res);
+
+    //checkAndDelete with wrong value
+    Delete delete = new Delete(row1);
+    delete.deleteFamily(fam1);
+    res = region.checkAndMutate(row1, fam1, qf1, val2, delete, lockId, true);
     assertEquals(false, res);
   }
 
-  public void testCheckAndPut_WithCorrectValue() throws IOException{
+  public void testCheckAndMutate_WithCorrectValue() throws IOException{
     byte [] tableName = Bytes.toBytes("testtable");
     byte [] row1 = Bytes.toBytes("row1");
     byte [] fam1 = Bytes.toBytes("fam1");
@@ -396,7 +429,13 @@ public class TestHRegion extends HBaseTe
     region.put(put);
 
     //checkAndPut with correct value
-    boolean res = region.checkAndPut(row1, fam1, qf1, val1, put, lockId, true);
+    boolean res = region.checkAndMutate(row1, fam1, qf1, val1, put, lockId, true);
+    assertEquals(true, res);
+
+    //checkAndDelete with correct value
+    Delete delete = new Delete(row1);
+    delete.deleteColumn(fam1, qf1);
+    res = region.checkAndMutate(row1, fam1, qf1, val1, put, lockId, true);
     assertEquals(true, res);
   }
 
@@ -431,7 +470,7 @@ public class TestHRegion extends HBaseTe
     Store store = region.getStore(fam1);
     store.memstore.kvset.size();
 
-    boolean res = region.checkAndPut(row1, fam1, qf1, val1, put, lockId, true);
+    boolean res = region.checkAndMutate(row1, fam1, qf1, val1, put, lockId, true);
     assertEquals(true, res);
     store.memstore.kvset.size();
 
@@ -448,6 +487,80 @@ public class TestHRegion extends HBaseTe
 
   }
 
+  public void testCheckAndDelete_ThatDeleteWasWritten() throws IOException{
+    byte [] tableName = Bytes.toBytes("testtable");
+    byte [] row1 = Bytes.toBytes("row1");
+    byte [] fam1 = Bytes.toBytes("fam1");
+    byte [] fam2 = Bytes.toBytes("fam2");
+    byte [] qf1  = Bytes.toBytes("qualifier1");
+    byte [] qf2  = Bytes.toBytes("qualifier2");
+    byte [] qf3  = Bytes.toBytes("qualifier3");
+    byte [] val1  = Bytes.toBytes("value1");
+    byte [] val2  = Bytes.toBytes("value2");
+    byte [] val3  = Bytes.toBytes("value3");
+    byte[] emptyVal = new byte[] { };
+    Integer lockId = null;
+
+    byte [][] families = {fam1, fam2};
+
+    //Setting up region
+    String method = this.getName();
+    initHRegion(tableName, method, families);
+
+    //Put content
+    Put put = new Put(row1);
+    put.add(fam1, qf1, val1);
+    region.put(put);
+    Threads.sleep(2);
+
+    put = new Put(row1);
+    put.add(fam1, qf1, val2);
+    put.add(fam2, qf1, val3);
+    put.add(fam2, qf2, val2);
+    put.add(fam2, qf3, val1);
+    put.add(fam1, qf3, val1);
+    region.put(put);
+
+    //Multi-column delete
+    Delete delete = new Delete(row1);
+    delete.deleteColumn(fam1, qf1);
+    delete.deleteColumn(fam2, qf1);
+    delete.deleteColumn(fam1, qf3);
+    boolean res = region.checkAndMutate(row1, fam1, qf1, val2, delete, lockId,
+        true);
+    assertEquals(true, res);
+
+    Get get = new Get(row1);
+    get.addColumn(fam1, qf1);
+    get.addColumn(fam1, qf3);
+    get.addColumn(fam2, qf2);
+    Result r = region.get(get, null);
+    assertEquals(2, r.size());
+    assertEquals(val1, r.getValue(fam1, qf1));
+    assertEquals(val2, r.getValue(fam2, qf2));
+
+    //Family delete
+    delete = new Delete(row1);
+    delete.deleteFamily(fam2);
+    res = region.checkAndMutate(row1, fam2, qf1, emptyVal, delete, lockId,
+        true);
+    assertEquals(true, res);
+
+    get = new Get(row1);
+    r = region.get(get, null);
+    assertEquals(1, r.size());
+    assertEquals(val1, r.getValue(fam1, qf1));
+
+    //Row delete
+    delete = new Delete(row1);
+    res = region.checkAndMutate(row1, fam1, qf1, val1, delete, lockId,
+        true);
+    assertEquals(true, res);
+    get = new Get(row1);
+    r = region.get(get, null);
+    assertEquals(0, r.size());
+  }
+
   //////////////////////////////////////////////////////////////////////////////
   // Delete tests
   //////////////////////////////////////////////////////////////////////////////
@@ -524,6 +637,7 @@ public class TestHRegion extends HBaseTe
     byte [][] families = {fam};
     String method = this.getName();
     initHRegion(tableName, method, families);
+    EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
 
     byte [] row = Bytes.toBytes("table_name");
     // column names
@@ -562,9 +676,6 @@ public class TestHRegion extends HBaseTe
     result = region.get(get, null);
     assertEquals(1, result.size());
 
-    // Sleep to ensure timestamp of next Put is bigger than previous delete
-    Thread.sleep(10);
-    
     // Assert that after a delete, I can put.
     put = new Put(row);
     put.add(fam, splitA, Bytes.toBytes("reference_A"));
@@ -577,10 +688,7 @@ public class TestHRegion extends HBaseTe
     delete = new Delete(row);
     region.delete(delete, null, false);
     assertEquals(0, region.get(get, null).size());
-    
-    // Sleep to ensure timestamp of next Put is bigger than previous delete
-    Thread.sleep(10);
-    
+
     region.put(new Put(row).add(fam, splitA, Bytes.toBytes("reference_A")));
     result = region.get(get, null);
     assertEquals(1, result.size());
@@ -676,16 +784,14 @@ public class TestHRegion extends HBaseTe
   public void doTestDelete_AndPostInsert(Delete delete)
       throws IOException, InterruptedException {
     initHRegion(tableName, getName(), fam1);
+    EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
     Put put = new Put(row);
     put.add(fam1, qual1, value1);
     region.put(put);
 
-    Thread.sleep(10);
-
     // now delete the value:
     region.delete(delete, null, true);
 
-    Thread.sleep(10);
 
     // ok put data:
     put = new Put(row);
@@ -1118,23 +1224,51 @@ public class TestHRegion extends HBaseTe
 
     Scan scan = null;
     HRegion.RegionScanner is = null;
-    
-    //Testing to see how many scanners that is produced by getScanner, starting 
+
+    //Testing to see how many scanners that is produced by getScanner, starting
     //with known number, 2 - current = 1
     scan = new Scan();
     scan.addFamily(fam2);
     scan.addFamily(fam4);
     is = (RegionScanner) region.getScanner(scan);
-    is.initHeap(); // i dont like this test
     assertEquals(1, ((RegionScanner)is).storeHeap.getHeap().size());
-    
+
     scan = new Scan();
     is = (RegionScanner) region.getScanner(scan);
-    is.initHeap();
-    assertEquals(families.length -1, 
+    assertEquals(families.length -1,
         ((RegionScanner)is).storeHeap.getHeap().size());
   }
 
+  /**
+   * This method tests https://issues.apache.org/jira/browse/HBASE-2516.
+   */
+  public void testGetScanner_WithRegionClosed() {
+    byte[] tableName = Bytes.toBytes("testtable");
+    byte[] fam1 = Bytes.toBytes("fam1");
+    byte[] fam2 = Bytes.toBytes("fam2");
+
+    byte[][] families = {fam1, fam2};
+
+    //Setting up region
+    String method = this.getName();
+    try {
+      initHRegion(tableName, method, families);
+    } catch (IOException e) {
+      e.printStackTrace();
+      fail("Got IOException during initHRegion, " + e.getMessage());
+    }
+    region.closed.set(true);
+    try {
+      region.getScanner(null);
+      fail("Expected to get an exception during getScanner on a region that is closed");
+    } catch (org.apache.hadoop.hbase.NotServingRegionException e) {
+      //this is the correct exception that is expected
+    } catch (IOException e) {
+      fail("Got wrong type of exception - should be a NotServingRegionException, but was an IOException: "
+              + e.getMessage());
+    }
+  }
+
   public void testRegionScanner_Next() throws IOException {
     byte [] tableName = Bytes.toBytes("testtable");
     byte [] row1 = Bytes.toBytes("row1");
@@ -1609,7 +1743,7 @@ public class TestHRegion extends HBaseTe
     assertTrue("ICV failed to upgrade timestamp",
         first.getTimestamp() != second.getTimestamp());
   }
-  
+
   public void testIncrementColumnValue_ConcurrentFlush() throws IOException {
     initHRegion(tableName, getName(), fam1);
 
@@ -2104,6 +2238,8 @@ public class TestHRegion extends HBaseTe
     initHRegion(tableName, method, families);
     PutThread putThread = new PutThread(numRows, families, qualifiers);
     putThread.start();
+    putThread.waitForFirstPut();
+
     FlushThread flushThread = new FlushThread();
     flushThread.start();
 
@@ -2153,6 +2289,8 @@ public class TestHRegion extends HBaseTe
 
   protected class PutThread extends Thread {
     private volatile boolean done;
+    private volatile int numPutsFinished = 0;
+
     private Throwable error = null;
     private int numRows;
     private byte[][] families;
@@ -2165,6 +2303,17 @@ public class TestHRegion extends HBaseTe
       this.qualifiers = qualifiers;
     }
 
+    /**
+     * Block until this thread has put at least one row.
+     */
+    public void waitForFirstPut() throws InterruptedException {
+      // wait until put thread actually puts some data
+      while (numPutsFinished == 0) {
+        checkNoError();
+        Thread.sleep(50);
+      }
+    }
+
     public void done() {
       done = true;
       synchronized (this) {
@@ -2181,7 +2330,6 @@ public class TestHRegion extends HBaseTe
     @Override
     public void run() {
       done = false;
-      int val = 0;
       while (!done) {
         try {
           for (int r = 0; r < numRows; r++) {
@@ -2189,18 +2337,19 @@ public class TestHRegion extends HBaseTe
             Put put = new Put(row);
             for (byte[] family : families) {
               for (byte[] qualifier : qualifiers) {
-                put.add(family, qualifier, (long) val,
-                    Bytes.toBytes(val));
+                put.add(family, qualifier, (long) numPutsFinished,
+                    Bytes.toBytes(numPutsFinished));
               }
             }
 //            System.out.println("Putting of kvsetsize=" + put.size());
             region.put(put);
-            if (val > 0 && val % 47 == 0) {
-              System.out.println("put iteration = " + val);
-              Delete delete = new Delete(row, (long)val-30, null);
+            numPutsFinished++;
+            if (numPutsFinished > 0 && numPutsFinished % 47 == 0) {
+              System.out.println("put iteration = " + numPutsFinished);
+              Delete delete = new Delete(row, (long)numPutsFinished-30, null);
               region.delete(delete, null, true);
             }
-            val++;
+            numPutsFinished++;
           }
         } catch (IOException e) {
           LOG.error("error while putting records", e);
@@ -2244,6 +2393,8 @@ public class TestHRegion extends HBaseTe
     initHRegion(tableName, method, families);
     PutThread putThread = new PutThread(numRows, families, qualifiers);
     putThread.start();
+    putThread.waitForFirstPut();
+
     FlushThread flushThread = new FlushThread();
     flushThread.start();
 
@@ -2292,7 +2443,7 @@ public class TestHRegion extends HBaseTe
     }
 
     putThread.done();
-    
+
     region.flushcache();
 
     putThread.join();
@@ -2348,7 +2499,7 @@ public class TestHRegion extends HBaseTe
   }
 
 
-  
+
   private void putData(int startRow, int numRows, byte [] qf,
       byte [] ...families)
   throws IOException {

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java Tue Sep 27 02:41:16 2011
@@ -19,19 +19,64 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.MD5Hash;
 
-public class TestHRegionInfo extends HBaseTestCase {
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestHRegionInfo {
+  @Test
   public void testCreateHRegionInfoName() throws Exception {
     String tableName = "tablename";
     final byte [] tn = Bytes.toBytes(tableName);
     String startKey = "startkey";
     final byte [] sk = Bytes.toBytes(startKey);
     String id = "id";
-    byte [] name = HRegionInfo.createRegionName(tn, sk, id);
+
+    // old format region name
+    byte [] name = HRegionInfo.createRegionName(tn, sk, id, false);
     String nameStr = Bytes.toString(name);
-    assertEquals(nameStr, tableName + "," + startKey + "," + id);
+    assertEquals(tableName + "," + startKey + "," + id, nameStr);
+
+
+    // new format region name.
+    String md5HashInHex = MD5Hash.getMD5AsHex(name);
+    assertEquals(HRegionInfo.MD5_HEX_LENGTH, md5HashInHex.length());
+    name = HRegionInfo.createRegionName(tn, sk, id, true);
+    nameStr = Bytes.toString(name);
+    assertEquals(tableName + "," + startKey + ","
+                 + id + "." + md5HashInHex + ".",
+                 nameStr);
+  }
+
+  @Test
+  public void testContainsRange() {
+    HTableDescriptor tableDesc = new HTableDescriptor("testtable");
+    HRegionInfo hri = new HRegionInfo(
+        tableDesc, Bytes.toBytes("a"), Bytes.toBytes("g"));
+    // Single row range at start of region
+    assertTrue(hri.containsRange(Bytes.toBytes("a"), Bytes.toBytes("a")));
+    // Fully contained range
+    assertTrue(hri.containsRange(Bytes.toBytes("b"), Bytes.toBytes("c")));
+    // Range overlapping start of region
+    assertTrue(hri.containsRange(Bytes.toBytes("a"), Bytes.toBytes("c")));
+    // Fully contained single-row range
+    assertTrue(hri.containsRange(Bytes.toBytes("c"), Bytes.toBytes("c")));
+    // Range that overlaps end key and hence doesn't fit
+    assertFalse(hri.containsRange(Bytes.toBytes("a"), Bytes.toBytes("g")));
+    // Single row range on end key
+    assertFalse(hri.containsRange(Bytes.toBytes("g"), Bytes.toBytes("g")));
+    // Single row range entirely outside
+    assertFalse(hri.containsRange(Bytes.toBytes("z"), Bytes.toBytes("z")));
+
+    // Degenerate range
+    try {
+      hri.containsRange(Bytes.toBytes("z"), Bytes.toBytes("a"));
+      fail("Invalid range did not throw IAE");
+    } catch (IllegalArgumentException iae) {
+    }
   }
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java Tue Sep 27 02:41:16 2011
@@ -20,6 +20,7 @@
 
 package org.apache.hadoop.hbase.regionserver;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
@@ -64,7 +65,7 @@ implements HConstants {
     col5 = Bytes.toBytes("col5");
   }
 
-  public void testSorted(){
+  public void testSorted() throws IOException{
     //Cases that need to be checked are:
     //1. The "smallest" KeyValue is in the same scanners as current
     //2. Current scanner gets empty
@@ -126,7 +127,7 @@ implements HConstants {
 
   }
 
-  public void testSeek(){
+  public void testSeek() throws IOException {
     //Cases:
     //1. Seek KeyValue that is not in scanner
     //2. Check that smallest that is returned from a seek is correct
@@ -174,7 +175,7 @@ implements HConstants {
 
   }
 
-  public void testScannerLeak() {
+  public void testScannerLeak() throws IOException {
     // Test for unclosed scanners (HBASE-1927)
 
     List<KeyValue> l1 = new ArrayList<KeyValue>();

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java Tue Sep 27 02:41:16 2011
@@ -20,6 +20,8 @@
 
 package org.apache.hadoop.hbase.regionserver;
 
+import java.io.IOException;
+
 import junit.framework.TestCase;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueTestUtil;
@@ -28,7 +30,7 @@ import org.apache.hadoop.hbase.util.Byte
 public class TestKeyValueScanFixture extends TestCase {
 
 
-  public void testKeyValueScanFixture() {
+  public void testKeyValueScanFixture() throws IOException {
     KeyValue kvs[] = new KeyValue[]{
         KeyValueTestUtil.create("RowA", "family", "qf1",
             1, KeyValue.Type.Put, "value-1"),

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java Tue Sep 27 02:41:16 2011
@@ -158,8 +158,9 @@ public class TestMemStore extends TestCa
 
   /**
    * A simple test which verifies the 3 possible states when scanning across snapshot.
+   * @throws IOException
    */
-  public void testScanAcrossSnapshot2() {
+  public void testScanAcrossSnapshot2() throws IOException {
     // we are going to the scanning across snapshot with two kvs
     // kv1 should always be returned before kv2
     final byte[] one = Bytes.toBytes(1);
@@ -188,7 +189,8 @@ public class TestMemStore extends TestCa
     verifyScanAcrossSnapshot2(kv1, kv2);
   }
 
-  private void verifyScanAcrossSnapshot2(KeyValue kv1, KeyValue kv2) {
+  private void verifyScanAcrossSnapshot2(KeyValue kv1, KeyValue kv2)
+      throws IOException {
     ReadWriteConsistencyControl.resetThreadReadPoint(rwcc);
     List<KeyValueScanner> memstorescanners = this.memstore.getScanners();
     assertEquals(1, memstorescanners.size());
@@ -199,7 +201,8 @@ public class TestMemStore extends TestCa
     assertNull(scanner.next());
   }
 
-  private void assertScannerResults(KeyValueScanner scanner, KeyValue[] expected) {
+  private void assertScannerResults(KeyValueScanner scanner, KeyValue[] expected)
+      throws IOException {
     scanner.seek(KeyValue.createFirstOnRow(new byte[]{}));
     for (KeyValue kv : expected) {
       assertTrue(0 ==
@@ -209,7 +212,7 @@ public class TestMemStore extends TestCa
     assertNull(scanner.peek());
   }
 
-  public void testMemstoreConcurrentControl() {
+  public void testMemstoreConcurrentControl() throws IOException {
     final byte[] row = Bytes.toBytes(1);
     final byte[] f = Bytes.toBytes("family");
     final byte[] q1 = Bytes.toBytes("q1");
@@ -250,7 +253,6 @@ public class TestMemStore extends TestCa
   }
 
   private static class ReadOwnWritesTester extends Thread {
-    final int id;
     static final int NUM_TRIES = 1000;
 
     final byte[] row;
@@ -269,7 +271,6 @@ public class TestMemStore extends TestCa
                                ReadWriteConsistencyControl rwcc,
                                AtomicReference<Throwable> caughtException)
     {
-      this.id = id;
       this.rwcc = rwcc;
       this.memstore = memstore;
       this.caughtException = caughtException;
@@ -284,7 +285,7 @@ public class TestMemStore extends TestCa
       }
     }
 
-    private void internalRun() {
+    private void internalRun() throws IOException {
       for (long i = 0; i < NUM_TRIES && caughtException.get() == null; i++) {
         ReadWriteConsistencyControl.WriteEntry w =
           rwcc.beginMemstoreInsert();
@@ -855,7 +856,7 @@ public class TestMemStore extends TestCa
   }
 
 
-  static void doScan(MemStore ms, int iteration) {
+  static void doScan(MemStore ms, int iteration) throws IOException {
     long nanos = System.nanoTime();
     KeyValueScanner s = ms.getScanners().get(0);
     s.seek(KeyValue.createFirstOnRow(new byte[]{}));
@@ -868,7 +869,7 @@ public class TestMemStore extends TestCa
 
   }
 
-  public static void main(String [] args) {
+  public static void main(String [] args) throws IOException {
     ReadWriteConsistencyControl rwcc = new ReadWriteConsistencyControl();
     MemStore ms = new MemStore();
 

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Tue Sep 27 02:41:16 2011
@@ -130,7 +130,7 @@ public class TestStore extends TestCase 
     flush(1);
     // Now put in place an empty store file.  Its a little tricky.  Have to
     // do manually with hacked in sequence id.
-    StoreFile f = this.store.getStorefiles().firstEntry().getValue();
+    StoreFile f = this.store.getStorefiles().get(0);
     Path storedir = f.getPath().getParent();
     long seqid = f.getMaxSequenceId();
     HBaseConfiguration c = new HBaseConfiguration();

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1176171&r1=1176170&r2=1176171&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Tue Sep 27 02:41:16 2011
@@ -21,14 +21,18 @@ package org.apache.hadoop.hbase.regionse
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
 import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.Reference.Range;
@@ -36,6 +40,12 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.mockito.Mockito;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
 
 /**
  * Test HStoreFile
@@ -79,22 +89,23 @@ public class TestStoreFile extends HBase
         StoreFile.BloomType.NONE, false));
   }
 
+  private void writeStoreFile(final HFile.Writer writer) throws IOException {
+    writeStoreFile(writer, Bytes.toBytes(getName()), Bytes.toBytes(getName()));
+  }
   /*
    * Writes HStoreKey and ImmutableBytes data to passed writer and
    * then closes it.
    * @param writer
    * @throws IOException
    */
-  private void writeStoreFile(final HFile.Writer writer)
+  public static void writeStoreFile(final HFile.Writer writer, byte[] fam, byte[] qualifier)
   throws IOException {
     long now = System.currentTimeMillis();
-    byte [] fam = Bytes.toBytes(getName());
-    byte [] qf = Bytes.toBytes(getName());
     try {
       for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) {
         for (char e = FIRST_CHAR; e <= LAST_CHAR; e++) {
           byte[] b = new byte[] { (byte) d, (byte) e };
-          writer.append(new KeyValue(b, fam, qf, now, b));
+          writer.append(new KeyValue(b, fam, qualifier, now, b));
         }
       }
     } finally {
@@ -148,14 +159,14 @@ public class TestStoreFile extends HBase
     KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
     byte [] midRow = midKV.getRow();
     // Create top split.
-    Path topDir = Store.getStoreHomedir(this.testDir, 1,
+    Path topDir = Store.getStoreHomedir(this.testDir, "1",
       Bytes.toBytes(f.getPath().getParent().getName()));
     if (this.fs.exists(topDir)) {
       this.fs.delete(topDir, true);
     }
     Path topPath = StoreFile.split(this.fs, topDir, f, midRow, Range.top);
     // Create bottom split.
-    Path bottomDir = Store.getStoreHomedir(this.testDir, 2,
+    Path bottomDir = Store.getStoreHomedir(this.testDir, "2",
       Bytes.toBytes(f.getPath().getParent().getName()));
     if (this.fs.exists(bottomDir)) {
       this.fs.delete(bottomDir, true);
@@ -308,7 +319,7 @@ public class TestStoreFile extends HBase
   }
   
   private static String ROOT_DIR =
-    System.getProperty("test.build.data", "/tmp/TestStoreFile");
+    HBaseTestingUtility.getTestDir("TestStoreFile").toString();
   private static String localFormatter = "%010d";
   
   public void testBloomFilter() throws Exception {
@@ -440,4 +451,52 @@ public class TestStoreFile extends HBase
     
   }
   
+  public void testFlushTimeComparator() {
+    assertOrdering(StoreFile.Comparators.FLUSH_TIME,
+        mockStoreFile(true, 1000, -1, "/foo/123"),
+        mockStoreFile(true, 1000, -1, "/foo/126"),
+        mockStoreFile(true, 2000, -1, "/foo/126"),
+        mockStoreFile(false, -1, 1, "/foo/1"),
+        mockStoreFile(false, -1, 3, "/foo/2"),
+        mockStoreFile(false, -1, 5, "/foo/2"),
+        mockStoreFile(false, -1, 5, "/foo/3"));
+  }
+
+  /**
+   * Assert that the given comparator orders the given storefiles in the
+   * same way that they're passed.
+   */
+  private void assertOrdering(Comparator<StoreFile> comparator, StoreFile ... sfs) {
+    ArrayList<StoreFile> sorted = Lists.newArrayList(sfs);
+    Collections.shuffle(sorted);
+    Collections.sort(sorted, comparator);
+    LOG.debug("sfs: " + Joiner.on(",").join(sfs));
+    LOG.debug("sorted: " + Joiner.on(",").join(sorted));
+    assertTrue(Iterables.elementsEqual(Arrays.asList(sfs), sorted));
+  }
+
+  /**
+   * Create a mock StoreFile with the given attributes.
+   */
+  private StoreFile mockStoreFile(boolean bulkLoad, long bulkTimestamp,
+      long seqId, String path) {
+    StoreFile mock = Mockito.mock(StoreFile.class);
+    Mockito.doReturn(bulkLoad).when(mock).isBulkLoadResult();
+    Mockito.doReturn(bulkTimestamp).when(mock).getBulkLoadTimestamp();
+    if (bulkLoad) {
+      // Bulk load files will throw if you ask for their sequence ID
+      Mockito.doThrow(new IllegalAccessError("bulk load"))
+        .when(mock).getMaxSequenceId();
+    } else {
+      Mockito.doReturn(seqId).when(mock).getMaxSequenceId();
+    }
+    Mockito.doReturn(new Path(path)).when(mock).getPath();
+    String name = "mock storefile, bulkLoad=" + bulkLoad +
+      " bulkTimestamp=" + bulkTimestamp +
+      " seqId=" + seqId +
+      " path=" + path;
+    Mockito.doReturn(name).when(mock).toString();
+    return mock;
+  }
+
 }



Mime
View raw message