hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r942186 [16/18] - in /hadoop/hbase/trunk: ./ contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/ core/src/main/java/org/apache/hadoop/hbase/ core/src/main/java/org/apache/hadoop/hbase/client/ core/src/main/java/org/apache/...
Date Fri, 07 May 2010 19:26:51 GMT
Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java Fri May  7 19:26:45 2010
@@ -29,27 +29,27 @@ import junit.framework.TestCase;
 
 /**
  * Tests the concurrent LruBlockCache.<p>
- * 
+ *
  * Tests will ensure it grows and shrinks in size properly,
- * evictions run when they're supposed to and do what they should, 
+ * evictions run when they're supposed to and do what they should,
  * and that cached blocks are accessible when expected to be.
  */
 public class TestLruBlockCache extends TestCase {
-  
+
   public void testBackgroundEvictionThread() throws Exception {
 
     long maxSize = 100000;
     long blockSize = calculateBlockSizeDefault(maxSize, 9); // room for 9, will evict
-    
+
     LruBlockCache cache = new LruBlockCache(maxSize,blockSize);
-    
+
     Block [] blocks = generateFixedBlocks(10, blockSize, "block");
-    
+
     // Add all the blocks
     for(Block block : blocks) {
       cache.cacheBlock(block.blockName, block.buf);
     }
-    
+
     // Let the eviction run
     int n = 0;
     while(cache.getEvictionCount() == 0) {
@@ -58,43 +58,43 @@ public class TestLruBlockCache extends T
       assertTrue(n++ < 2);
     }
     System.out.println("Background Evictions run: " + cache.getEvictionCount());
-    
+
     // A single eviction run should have occurred
     assertEquals(cache.getEvictionCount(), 1);
   }
-  
+
   public void testCacheSimple() throws Exception {
-    
+
     long maxSize = 1000000;
     long blockSize = calculateBlockSizeDefault(maxSize, 101);
-    
+
     LruBlockCache cache = new LruBlockCache(maxSize, blockSize);
 
     Block [] blocks = generateRandomBlocks(100, blockSize);
-    
+
     long expectedCacheSize = cache.heapSize();
-        
+
     // Confirm empty
     for(Block block : blocks) {
       assertTrue(cache.getBlock(block.blockName) == null);
     }
-    
+
     // Add blocks
     for(Block block : blocks) {
       cache.cacheBlock(block.blockName, block.buf);
       expectedCacheSize += block.heapSize();
     }
-    
+
     // Verify correctly calculated cache heap size
     assertEquals(expectedCacheSize, cache.heapSize());
-    
+
     // Check if all blocks are properly cached and retrieved
     for(Block block : blocks) {
       ByteBuffer buf = cache.getBlock(block.blockName);
       assertTrue(buf != null);
       assertEquals(buf.capacity(), block.buf.capacity());
     }
-    
+
     // Re-add same blocks and ensure nothing has changed
     for(Block block : blocks) {
       try {
@@ -104,52 +104,52 @@ public class TestLruBlockCache extends T
         // expected
       }
     }
-    
+
     // Verify correctly calculated cache heap size
     assertEquals(expectedCacheSize, cache.heapSize());
-    
+
     // Check if all blocks are properly cached and retrieved
     for(Block block : blocks) {
       ByteBuffer buf = cache.getBlock(block.blockName);
       assertTrue(buf != null);
       assertEquals(buf.capacity(), block.buf.capacity());
     }
-    
+
     // Expect no evictions
     assertEquals(0, cache.getEvictionCount());
   }
-  
+
   public void testCacheEvictionSimple() throws Exception {
-    
+
     long maxSize = 100000;
     long blockSize = calculateBlockSizeDefault(maxSize, 10);
-        
+
     LruBlockCache cache = new LruBlockCache(maxSize,blockSize,false);
-    
+
     Block [] blocks = generateFixedBlocks(10, blockSize, "block");
-    
+
     long expectedCacheSize = cache.heapSize();
-    
+
     // Add all the blocks
     for(Block block : blocks) {
       cache.cacheBlock(block.blockName, block.buf);
       expectedCacheSize += block.heapSize();
     }
-    
+
     // A single eviction run should have occurred
     assertEquals(1, cache.getEvictionCount());
-    
+
     // Our expected size overruns acceptable limit
-    assertTrue(expectedCacheSize > 
+    assertTrue(expectedCacheSize >
       (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
-    
+
     // But the cache did not grow beyond max
     assertTrue(cache.heapSize() < maxSize);
-    
+
     // And is still below the acceptable limit
-    assertTrue(cache.heapSize() < 
+    assertTrue(cache.heapSize() <
         (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
-  
+
     // All blocks except block 0 and 1 should be in the cache
     assertTrue(cache.getBlock(blocks[0].blockName) == null);
     assertTrue(cache.getBlock(blocks[1].blockName) == null);
@@ -160,308 +160,308 @@ public class TestLruBlockCache extends T
   }
 
   public void testCacheEvictionTwoPriorities() throws Exception {
-    
+
     long maxSize = 100000;
     long blockSize = calculateBlockSizeDefault(maxSize, 10);
-    
+
     LruBlockCache cache = new LruBlockCache(maxSize,blockSize,false);
-    
+
     Block [] singleBlocks = generateFixedBlocks(5, 10000, "single");
     Block [] multiBlocks = generateFixedBlocks(5, 10000, "multi");
-    
+
     long expectedCacheSize = cache.heapSize();
-    
+
     // Add and get the multi blocks
     for(Block block : multiBlocks) {
       cache.cacheBlock(block.blockName, block.buf);
       expectedCacheSize += block.heapSize();
       assertEquals(cache.getBlock(block.blockName), block.buf);
     }
-    
+
     // Add the single blocks (no get)
     for(Block block : singleBlocks) {
       cache.cacheBlock(block.blockName, block.buf);
       expectedCacheSize += block.heapSize();
     }
-    
+
     // A single eviction run should have occurred
     assertEquals(cache.getEvictionCount(), 1);
-    
+
     // We expect two entries evicted
     assertEquals(cache.getEvictedCount(), 2);
-    
+
     // Our expected size overruns acceptable limit
-    assertTrue(expectedCacheSize > 
+    assertTrue(expectedCacheSize >
       (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
-    
+
     // But the cache did not grow beyond max
     assertTrue(cache.heapSize() <= maxSize);
-    
+
     // And is now below the acceptable limit
-    assertTrue(cache.heapSize() <= 
+    assertTrue(cache.heapSize() <=
         (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
-  
+
     // We expect fairness across the two priorities.
     // This test makes multi go barely over its limit, in-memory
     // empty, and the rest in single.  Two single evictions and
     // one multi eviction expected.
     assertTrue(cache.getBlock(singleBlocks[0].blockName) == null);
     assertTrue(cache.getBlock(multiBlocks[0].blockName) == null);
-    
+
     // And all others to be cached
     for(int i=1;i<4;i++) {
-      assertEquals(cache.getBlock(singleBlocks[i].blockName), 
+      assertEquals(cache.getBlock(singleBlocks[i].blockName),
           singleBlocks[i].buf);
-      assertEquals(cache.getBlock(multiBlocks[i].blockName), 
+      assertEquals(cache.getBlock(multiBlocks[i].blockName),
           multiBlocks[i].buf);
     }
   }
 
   public void testCacheEvictionThreePriorities() throws Exception {
-    
+
     long maxSize = 100000;
     long blockSize = calculateBlockSize(maxSize, 10);
-    
+
     LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false,
         (int)Math.ceil(1.2*maxSize/blockSize),
-        LruBlockCache.DEFAULT_LOAD_FACTOR, 
+        LruBlockCache.DEFAULT_LOAD_FACTOR,
         LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,
         0.98f, // min
         0.99f, // acceptable
         0.33f, // single
         0.33f, // multi
         0.34f);// memory
-       
-    
+
+
     Block [] singleBlocks = generateFixedBlocks(5, blockSize, "single");
     Block [] multiBlocks = generateFixedBlocks(5, blockSize, "multi");
     Block [] memoryBlocks = generateFixedBlocks(5, blockSize, "memory");
-    
+
     long expectedCacheSize = cache.heapSize();
-    
+
     // Add 3 blocks from each priority
     for(int i=0;i<3;i++) {
-      
+
       // Just add single blocks
       cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf);
       expectedCacheSize += singleBlocks[i].heapSize();
-      
+
       // Add and get multi blocks
       cache.cacheBlock(multiBlocks[i].blockName, multiBlocks[i].buf);
       expectedCacheSize += multiBlocks[i].heapSize();
       cache.getBlock(multiBlocks[i].blockName);
-      
+
       // Add memory blocks as such
       cache.cacheBlock(memoryBlocks[i].blockName, memoryBlocks[i].buf, true);
       expectedCacheSize += memoryBlocks[i].heapSize();
-      
+
     }
-    
+
     // Do not expect any evictions yet
     assertEquals(0, cache.getEvictionCount());
-    
+
     // Verify cache size
     assertEquals(expectedCacheSize, cache.heapSize());
-    
+
     // Insert a single block, oldest single should be evicted
     cache.cacheBlock(singleBlocks[3].blockName, singleBlocks[3].buf);
-    
+
     // Single eviction, one thing evicted
     assertEquals(1, cache.getEvictionCount());
     assertEquals(1, cache.getEvictedCount());
-    
+
     // Verify oldest single block is the one evicted
     assertEquals(null, cache.getBlock(singleBlocks[0].blockName));
-    
+
     // Change the oldest remaining single block to a multi
     cache.getBlock(singleBlocks[1].blockName);
-    
+
     // Insert another single block
     cache.cacheBlock(singleBlocks[4].blockName, singleBlocks[4].buf);
-    
+
     // Two evictions, two evicted.
     assertEquals(2, cache.getEvictionCount());
     assertEquals(2, cache.getEvictedCount());
-    
+
     // Oldest multi block should be evicted now
     assertEquals(null, cache.getBlock(multiBlocks[0].blockName));
-    
+
     // Insert another memory block
     cache.cacheBlock(memoryBlocks[3].blockName, memoryBlocks[3].buf, true);
-    
+
     // Three evictions, three evicted.
     assertEquals(3, cache.getEvictionCount());
     assertEquals(3, cache.getEvictedCount());
-    
+
     // Oldest memory block should be evicted now
     assertEquals(null, cache.getBlock(memoryBlocks[0].blockName));
-    
+
     // Add a block that is twice as big (should force two evictions)
     Block [] bigBlocks = generateFixedBlocks(3, blockSize*3, "big");
     cache.cacheBlock(bigBlocks[0].blockName, bigBlocks[0].buf);
-    
+
     // Four evictions, six evicted (inserted block 3X size, expect +3 evicted)
     assertEquals(4, cache.getEvictionCount());
     assertEquals(6, cache.getEvictedCount());
-    
+
     // Expect three remaining singles to be evicted
     assertEquals(null, cache.getBlock(singleBlocks[2].blockName));
     assertEquals(null, cache.getBlock(singleBlocks[3].blockName));
     assertEquals(null, cache.getBlock(singleBlocks[4].blockName));
-    
+
     // Make the big block a multi block
     cache.getBlock(bigBlocks[0].blockName);
-    
+
     // Cache another single big block
     cache.cacheBlock(bigBlocks[1].blockName, bigBlocks[1].buf);
-    
+
     // Five evictions, nine evicted (3 new)
     assertEquals(5, cache.getEvictionCount());
     assertEquals(9, cache.getEvictedCount());
-    
+
     // Expect three remaining multis to be evicted
     assertEquals(null, cache.getBlock(singleBlocks[1].blockName));
     assertEquals(null, cache.getBlock(multiBlocks[1].blockName));
     assertEquals(null, cache.getBlock(multiBlocks[2].blockName));
-    
+
     // Cache a big memory block
     cache.cacheBlock(bigBlocks[2].blockName, bigBlocks[2].buf, true);
-    
+
     // Six evictions, twelve evicted (3 new)
     assertEquals(6, cache.getEvictionCount());
     assertEquals(12, cache.getEvictedCount());
-    
+
     // Expect three remaining in-memory to be evicted
     assertEquals(null, cache.getBlock(memoryBlocks[1].blockName));
     assertEquals(null, cache.getBlock(memoryBlocks[2].blockName));
     assertEquals(null, cache.getBlock(memoryBlocks[3].blockName));
-    
-    
+
+
   }
-  
+
   // test scan resistance
   public void testScanResistance() throws Exception {
 
     long maxSize = 100000;
     long blockSize = calculateBlockSize(maxSize, 10);
-    
+
     LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false,
         (int)Math.ceil(1.2*maxSize/blockSize),
-        LruBlockCache.DEFAULT_LOAD_FACTOR, 
+        LruBlockCache.DEFAULT_LOAD_FACTOR,
         LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,
         0.66f, // min
         0.99f, // acceptable
         0.33f, // single
         0.33f, // multi
-        0.34f);// memory  
-      
+        0.34f);// memory
+
     Block [] singleBlocks = generateFixedBlocks(20, blockSize, "single");
     Block [] multiBlocks = generateFixedBlocks(5, blockSize, "multi");
-    
+
     // Add 5 multi blocks
     for(Block block : multiBlocks) {
       cache.cacheBlock(block.blockName, block.buf);
       cache.getBlock(block.blockName);
     }
-    
+
     // Add 5 single blocks
     for(int i=0;i<5;i++) {
       cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf);
     }
-    
+
     // An eviction ran
     assertEquals(1, cache.getEvictionCount());
-    
+
     // To drop down to 2/3 capacity, we'll need to evict 4 blocks
     assertEquals(4, cache.getEvictedCount());
-    
+
     // Should have been taken off equally from single and multi
     assertEquals(null, cache.getBlock(singleBlocks[0].blockName));
     assertEquals(null, cache.getBlock(singleBlocks[1].blockName));
     assertEquals(null, cache.getBlock(multiBlocks[0].blockName));
     assertEquals(null, cache.getBlock(multiBlocks[1].blockName));
-    
+
     // Let's keep "scanning" by adding single blocks.  From here on we only
     // expect evictions from the single bucket.
-    
+
     // Every time we reach 10 total blocks (every 4 inserts) we get 4 single
     // blocks evicted.  Inserting 13 blocks should yield 3 more evictions and
     // 12 more evicted.
-    
+
     for(int i=5;i<18;i++) {
       cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf);
     }
-    
+
     // 4 total evictions, 16 total evicted
     assertEquals(4, cache.getEvictionCount());
     assertEquals(16, cache.getEvictedCount());
-    
+
     // Should now have 7 total blocks
     assertEquals(7, cache.size());
 
   }
-  
+
   // test setMaxSize
   public void testResizeBlockCache() throws Exception {
-    
+
     long maxSize = 300000;
     long blockSize = calculateBlockSize(maxSize, 31);
-    
+
     LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false,
         (int)Math.ceil(1.2*maxSize/blockSize),
-        LruBlockCache.DEFAULT_LOAD_FACTOR, 
+        LruBlockCache.DEFAULT_LOAD_FACTOR,
         LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,
         0.98f, // min
         0.99f, // acceptable
         0.33f, // single
         0.33f, // multi
         0.34f);// memory
-       
+
     Block [] singleBlocks = generateFixedBlocks(10, blockSize, "single");
     Block [] multiBlocks = generateFixedBlocks(10, blockSize, "multi");
     Block [] memoryBlocks = generateFixedBlocks(10, blockSize, "memory");
-    
+
     // Add all blocks from all priorities
     for(int i=0;i<10;i++) {
-    
+
       // Just add single blocks
       cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf);
-      
+
       // Add and get multi blocks
       cache.cacheBlock(multiBlocks[i].blockName, multiBlocks[i].buf);
       cache.getBlock(multiBlocks[i].blockName);
-      
+
       // Add memory blocks as such
       cache.cacheBlock(memoryBlocks[i].blockName, memoryBlocks[i].buf, true);
     }
-    
+
     // Do not expect any evictions yet
     assertEquals(0, cache.getEvictionCount());
-    
+
     // Resize to half capacity plus an extra block (otherwise we evict an extra)
     cache.setMaxSize((long)(maxSize * 0.5f));
-    
+
     // Should have run a single eviction
     assertEquals(1, cache.getEvictionCount());
 
     // And we expect 1/2 of the blocks to be evicted
     assertEquals(15, cache.getEvictedCount());
-    
+
     // And the oldest 5 blocks from each category should be gone
     for(int i=0;i<5;i++) {
       assertEquals(null, cache.getBlock(singleBlocks[i].blockName));
       assertEquals(null, cache.getBlock(multiBlocks[i].blockName));
       assertEquals(null, cache.getBlock(memoryBlocks[i].blockName));
     }
-    
+
     // And the newest 5 blocks should still be accessible
     for(int i=5;i<10;i++) {
       assertEquals(singleBlocks[i].buf, cache.getBlock(singleBlocks[i].blockName));
       assertEquals(multiBlocks[i].buf, cache.getBlock(multiBlocks[i].blockName));
       assertEquals(memoryBlocks[i].buf, cache.getBlock(memoryBlocks[i].blockName));
-    }  
+    }
   }
-  
+
   private Block [] generateFixedBlocks(int numBlocks, int size, String pfx) {
     Block [] blocks = new Block[numBlocks];
     for(int i=0;i<numBlocks;i++) {
@@ -469,11 +469,11 @@ public class TestLruBlockCache extends T
     }
     return blocks;
   }
-  
+
   private Block [] generateFixedBlocks(int numBlocks, long size, String pfx) {
     return generateFixedBlocks(numBlocks, (int)size, pfx);
   }
-  
+
   private Block [] generateRandomBlocks(int numBlocks, long maxSize) {
     Block [] blocks = new Block[numBlocks];
     Random r = new Random();
@@ -482,7 +482,7 @@ public class TestLruBlockCache extends T
     }
     return blocks;
   }
-  
+
   private long calculateBlockSize(long maxSize, int numBlocks) {
     long roughBlockSize = maxSize / numBlocks;
     int numEntries = (int)Math.ceil((1.2)*maxSize/roughBlockSize);
@@ -494,7 +494,7 @@ public class TestLruBlockCache extends T
     negateBlockSize += CachedBlock.PER_BLOCK_OVERHEAD;
     return ClassSize.align((long)Math.floor((roughBlockSize - negateBlockSize)*0.99f));
   }
-  
+
   private long calculateBlockSizeDefault(long maxSize, int numBlocks) {
     long roughBlockSize = maxSize / numBlocks;
     int numEntries = (int)Math.ceil((1.2)*maxSize/roughBlockSize);
@@ -507,7 +507,7 @@ public class TestLruBlockCache extends T
     return ClassSize.align((long)Math.floor((roughBlockSize - negateBlockSize)*
         LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
   }
-  
+
   private static class Block implements HeapSize {
     String blockName;
     ByteBuffer buf;
@@ -516,9 +516,9 @@ public class TestLruBlockCache extends T
       this.blockName = blockName;
       this.buf = ByteBuffer.allocate(size);
     }
-    
+
     public long heapSize() {
-      return CachedBlock.PER_BLOCK_OVERHEAD + 
+      return CachedBlock.PER_BLOCK_OVERHEAD +
       ClassSize.align(blockName.length()) +
       ClassSize.align(buf.capacity());
     }

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java Fri May  7 19:26:45 2010
@@ -53,21 +53,21 @@ public class TestSeekTo extends HBaseTes
     reader.loadFileInfo();
     HFileScanner scanner = reader.getScanner(false, true);
     assertEquals(false, scanner.seekBefore(Bytes.toBytes("a")));
-    
+
     assertEquals(false, scanner.seekBefore(Bytes.toBytes("c")));
-    
+
     assertEquals(true, scanner.seekBefore(Bytes.toBytes("d")));
     assertEquals("c", scanner.getKeyString());
-    
+
     assertEquals(true, scanner.seekBefore(Bytes.toBytes("e")));
     assertEquals("c", scanner.getKeyString());
-    
+
     assertEquals(true, scanner.seekBefore(Bytes.toBytes("f")));
     assertEquals("e", scanner.getKeyString());
-    
+
     assertEquals(true, scanner.seekBefore(Bytes.toBytes("g")));
     assertEquals("e", scanner.getKeyString());
-    
+
     assertEquals(true, scanner.seekBefore(Bytes.toBytes("h")));
     assertEquals("g", scanner.getKeyString());
     assertEquals(true, scanner.seekBefore(Bytes.toBytes("i")));
@@ -79,7 +79,7 @@ public class TestSeekTo extends HBaseTes
     assertEquals(true, scanner.seekBefore(Bytes.toBytes("l")));
     assertEquals("k", scanner.getKeyString());
   }
-  
+
   public void testSeekTo() throws Exception {
     Path p = makeNewFile();
     HFile.Reader reader = new HFile.Reader(fs, p, null, false);
@@ -88,18 +88,18 @@ public class TestSeekTo extends HBaseTes
     HFileScanner scanner = reader.getScanner(false, true);
     // lies before the start of the file.
     assertEquals(-1, scanner.seekTo(Bytes.toBytes("a")));
-  
+
     assertEquals(1, scanner.seekTo(Bytes.toBytes("d")));
     assertEquals("c", scanner.getKeyString());
-    
+
     // Across a block boundary now.
     assertEquals(1, scanner.seekTo(Bytes.toBytes("h")));
     assertEquals("g", scanner.getKeyString());
-    
+
     assertEquals(1, scanner.seekTo(Bytes.toBytes("l")));
     assertEquals("k", scanner.getKeyString());
   }
-  
+
   public void testBlockContainingKey() throws Exception {
     Path p = makeNewFile();
     HFile.Reader reader = new HFile.Reader(fs, p, null, false);
@@ -118,6 +118,6 @@ public class TestSeekTo extends HBaseTes
     assertEquals(1, reader.blockIndex.blockContainingKey(Bytes.toBytes("l"), 0, 1));
 
 
-    
+
   }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java Fri May  7 19:26:45 2010
@@ -62,7 +62,7 @@ public class TestHFileOutputFormat exten
      */
     static class PEInputSplit extends InputSplit implements Writable {
       private int index = -1;
-   
+
       PEInputSplit() {
         super();
       }
@@ -97,12 +97,12 @@ public class TestHFileOutputFormat exten
         InterruptedException {
       final int startrow = ((PEInputSplit)split).getIndex() * ROWSPERSPLIT;
       return new RecordReader<ImmutableBytesWritable, ImmutableBytesWritable>() {
-        // Starts at a particular row 
+        // Starts at a particular row
         private int counter = startrow;
         private ImmutableBytesWritable key;
         private ImmutableBytesWritable value;
         private final Random random = new Random(System.currentTimeMillis());
-        
+
         public void close() throws IOException {
           // Nothing to do.
         }
@@ -124,7 +124,7 @@ public class TestHFileOutputFormat exten
         public void initialize(InputSplit arg0, TaskAttemptContext arg1)
             throws IOException, InterruptedException {
           // Nothing to do.
-          
+
         }
 
         public boolean nextKeyValue() throws IOException, InterruptedException {

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java Fri May  7 19:26:45 2010
@@ -51,10 +51,10 @@ import static org.junit.Assert.assertTru
 /**
  * Tests various scan start and stop row scenarios. This is set in a scan and
  * tested in a MapReduce job to see if that is handed over and done properly
- * too. 
+ * too.
  */
 public class TestTableInputFormatScan {
-   
+
   static final Log LOG = LogFactory.getLog(TestTableInputFormatScan.class);
   static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
@@ -62,9 +62,9 @@ public class TestTableInputFormatScan {
   static final byte[] INPUT_FAMILY = Bytes.toBytes("contents");
   static final String KEY_STARTROW = "startRow";
   static final String KEY_LASTROW = "stpRow";
-  
+
   private static HTable table = null;
-  
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     // switch TIF to log at DEBUG level
@@ -90,7 +90,7 @@ public class TestTableInputFormatScan {
   public void setUp() throws Exception {
     // nothing
   }
-  
+
   /**
    * @throws java.lang.Exception
    */
@@ -105,229 +105,229 @@ public class TestTableInputFormatScan {
    */
   public static class ScanMapper
   extends TableMapper<ImmutableBytesWritable, ImmutableBytesWritable> {
-  
+
     /**
      * Pass the key and value to reduce.
-     * 
-     * @param key  The key, here "aaa", "aab" etc. 
+     *
+     * @param key  The key, here "aaa", "aab" etc.
      * @param value  The value is the same as the key.
      * @param context  The task context.
      * @throws IOException When reading the rows fails.
      */
     @Override
     public void map(ImmutableBytesWritable key, Result value,
-      Context context) 
+      Context context)
     throws IOException, InterruptedException {
       if (value.size() != 1) {
         throw new IOException("There should only be one input column");
       }
-      Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> 
+      Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
         cf = value.getMap();
       if(!cf.containsKey(INPUT_FAMILY)) {
-        throw new IOException("Wrong input columns. Missing: '" + 
+        throw new IOException("Wrong input columns. Missing: '" +
           Bytes.toString(INPUT_FAMILY) + "'.");
       }
       String val = Bytes.toStringBinary(value.getValue(INPUT_FAMILY, null));
-      LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + 
+      LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) +
         ", value -> " + val);
       context.write(key, key);
     }
-    
+
   }
-  
+
   /**
    * Checks the last and first key seen against the scanner boundaries.
    */
-  public static class ScanReducer 
-  extends Reducer<ImmutableBytesWritable, ImmutableBytesWritable, 
+  public static class ScanReducer
+  extends Reducer<ImmutableBytesWritable, ImmutableBytesWritable,
                   NullWritable, NullWritable> {
-    
+
     private String first = null;
     private String last = null;
-    
-    protected void reduce(ImmutableBytesWritable key, 
-        Iterable<ImmutableBytesWritable> values, Context context) 
+
+    protected void reduce(ImmutableBytesWritable key,
+        Iterable<ImmutableBytesWritable> values, Context context)
     throws IOException ,InterruptedException {
       int count = 0;
       for (ImmutableBytesWritable value : values) {
         String val = Bytes.toStringBinary(value.get());
-        LOG.info("reduce: key[" + count + "] -> " + 
+        LOG.info("reduce: key[" + count + "] -> " +
           Bytes.toStringBinary(key.get()) + ", value -> " + val);
         if (first == null) first = val;
         last = val;
         count++;
       }
     }
-    
-    protected void cleanup(Context context) 
+
+    protected void cleanup(Context context)
     throws IOException, InterruptedException {
       Configuration c = context.getConfiguration();
-      String startRow = c.get(KEY_STARTROW);    
+      String startRow = c.get(KEY_STARTROW);
       String lastRow = c.get(KEY_LASTROW);
       LOG.info("cleanup: first -> \"" + first + "\", start row -> \"" + startRow + "\"");
       LOG.info("cleanup: last -> \"" + last + "\", last row -> \"" + lastRow + "\"");
-      if (startRow != null && startRow.length() > 0) { 
+      if (startRow != null && startRow.length() > 0) {
         assertEquals(startRow, first);
       }
-      if (lastRow != null && lastRow.length() > 0) { 
+      if (lastRow != null && lastRow.length() > 0) {
         assertEquals(lastRow, last);
       }
     }
-    
+
   }
-  
+
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanEmptyToEmpty() 
+  public void testScanEmptyToEmpty()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan(null, null, null);
   }
 
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanEmptyToAPP() 
+  public void testScanEmptyToAPP()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan(null, "app", "apo");
   }
-  
+
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanEmptyToBBA() 
+  public void testScanEmptyToBBA()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan(null, "bba", "baz");
   }
-  
+
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanEmptyToBBB() 
+  public void testScanEmptyToBBB()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan(null, "bbb", "bba");
   }
-  
+
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanEmptyToOPP() 
+  public void testScanEmptyToOPP()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan(null, "opp", "opo");
   }
-  
+
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanOBBToOPP() 
+  public void testScanOBBToOPP()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan("obb", "opp", "opo");
   }
 
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanOBBToQPP() 
+  public void testScanOBBToQPP()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan("obb", "qpp", "qpo");
   }
-  
+
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanOPPToEmpty() 
+  public void testScanOPPToEmpty()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan("opp", null, "zzz");
   }
-  
+
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanYYXToEmpty() 
+  public void testScanYYXToEmpty()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan("yyx", null, "zzz");
   }
 
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanYYYToEmpty() 
+  public void testScanYYYToEmpty()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan("yyy", null, "zzz");
   }
 
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @Test
-  public void testScanYZYToEmpty() 
+  public void testScanYZYToEmpty()
   throws IOException, InterruptedException, ClassNotFoundException {
     testScan("yzy", null, "zzz");
   }
 
   /**
    * Tests a MR scan using specific start and stop rows.
-   * 
+   *
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
   @SuppressWarnings("deprecation")
-  private void testScan(String start, String stop, String last) 
+  private void testScan(String start, String stop, String last)
   throws IOException, InterruptedException, ClassNotFoundException {
     String jobName = "Scan" + (start != null ? start.toUpperCase() : "Empty") +
     "To" + (stop != null ? stop.toUpperCase() : "Empty");
@@ -346,11 +346,11 @@ public class TestTableInputFormatScan {
     LOG.info("scan before: " + scan);
     Job job = new Job(c, jobName);
     TableMapReduceUtil.initTableMapperJob(
-      Bytes.toString(TABLE_NAME), scan, ScanMapper.class, 
+      Bytes.toString(TABLE_NAME), scan, ScanMapper.class,
       ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);
     job.setReducerClass(ScanReducer.class);
     job.setNumReduceTasks(1); // one to get final "first" and "last" key
-    FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));      
+    FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
     LOG.info("Started " + job.getJobName());
     job.waitForCompletion(true);
     assertTrue(job.isComplete());

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java Fri May  7 19:26:45 2010
@@ -50,14 +50,14 @@ import org.apache.hadoop.mapreduce.lib.o
  * a particular cell, and write it back to the table.
  */
 public class TestTableMapReduce extends MultiRegionTable {
-   
+
   private static final Log LOG = LogFactory.getLog(TestTableMapReduce.class);
 
   static final String MULTI_REGION_TABLE_NAME = "mrtest";
   static final byte[] INPUT_FAMILY = Bytes.toBytes("contents");
   static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text");
-  
-  /** constructor */ 
+
+  /** constructor */
   public TestTableMapReduce() {
     super(Bytes.toString(INPUT_FAMILY));
     desc = new HTableDescriptor(MULTI_REGION_TABLE_NAME);
@@ -70,30 +70,30 @@ public class TestTableMapReduce extends 
    */
   public static class ProcessContentsMapper
   extends TableMapper<ImmutableBytesWritable, Put> {
-  
+
     /**
      * Pass the key, and reversed value to reduce
-     * 
-     * @param key 
-     * @param value 
+     *
+     * @param key
+     * @param value
      * @param context
-     * @throws IOException 
+     * @throws IOException
      */
     public void map(ImmutableBytesWritable key, Result value,
-      Context context) 
+      Context context)
     throws IOException, InterruptedException {
       if (value.size() != 1) {
         throw new IOException("There should only be one input column");
       }
-      Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> 
+      Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
         cf = value.getMap();
       if(!cf.containsKey(INPUT_FAMILY)) {
-        throw new IOException("Wrong input columns. Missing: '" + 
+        throw new IOException("Wrong input columns. Missing: '" +
           Bytes.toString(INPUT_FAMILY) + "'.");
       }
 
       // Get the original value and reverse it
-      String originalValue = new String(value.getValue(INPUT_FAMILY, null), 
+      String originalValue = new String(value.getValue(INPUT_FAMILY, null),
         HConstants.UTF8_ENCODING);
       StringBuilder newValue = new StringBuilder(originalValue);
       newValue.reverse();
@@ -103,19 +103,19 @@ public class TestTableMapReduce extends 
       context.write(key, outval);
     }
   }
-  
+
   /**
    * Test a map/reduce against a multi-region table
    * @throws IOException
-   * @throws ClassNotFoundException 
-   * @throws InterruptedException 
+   * @throws ClassNotFoundException
+   * @throws InterruptedException
    */
-  public void testMultiRegionTable() 
+  public void testMultiRegionTable()
   throws IOException, InterruptedException, ClassNotFoundException {
     runTestOnTable(new HTable(conf, MULTI_REGION_TABLE_NAME));
   }
 
-  private void runTestOnTable(HTable table) 
+  private void runTestOnTable(HTable table)
   throws IOException, InterruptedException, ClassNotFoundException {
     MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);
 
@@ -128,12 +128,12 @@ public class TestTableMapReduce extends 
       scan.addFamily(INPUT_FAMILY);
       TableMapReduceUtil.initTableMapperJob(
         Bytes.toString(table.getTableName()), scan,
-        ProcessContentsMapper.class, ImmutableBytesWritable.class, 
+        ProcessContentsMapper.class, ImmutableBytesWritable.class,
         Put.class, job);
       TableMapReduceUtil.initTableReducerJob(
         Bytes.toString(table.getTableName()),
         IdentityTableReducer.class, job);
-      FileOutputFormat.setOutputPath(job, new Path("test"));      
+      FileOutputFormat.setOutputPath(job, new Path("test"));
       LOG.info("Started " + Bytes.toString(table.getTableName()));
       job.waitForCompletion(true);
       LOG.info("After map/reduce completion");
@@ -177,7 +177,7 @@ public class TestTableMapReduce extends 
   /**
    * Looks at every value of the mapreduce output and verifies that indeed
    * the values have been reversed.
-   * 
+   *
    * @param table Table to scan.
    * @throws IOException
    * @throws NullPointerException if we failed to find a cell value
@@ -210,14 +210,14 @@ public class TestTableMapReduce extends 
             break;
           }
         }
-        
+
         String first = "";
         if (firstValue == null) {
           throw new NullPointerException(Bytes.toString(r.getRow()) +
             ": first value is null");
         }
         first = new String(firstValue, HConstants.UTF8_ENCODING);
-        
+
         String second = "";
         if (secondValue == null) {
           throw new NullPointerException(Bytes.toString(r.getRow()) +

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java Fri May  7 19:26:45 2010
@@ -52,11 +52,11 @@ import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
 
 public class TestTimeRangeMapRed extends HBaseClusterTestCase {
-  
+
   private final static Log log = LogFactory.getLog(TestTimeRangeMapRed.class);
- 
+
   private static final byte [] KEY = Bytes.toBytes("row1");
-  private static final NavigableMap<Long, Boolean> TIMESTAMP = 
+  private static final NavigableMap<Long, Boolean> TIMESTAMP =
     new TreeMap<Long, Boolean>();
   static {
     TIMESTAMP.put((long)1245620000, false);
@@ -69,22 +69,22 @@ public class TestTimeRangeMapRed extends
   }
   static final long MINSTAMP = 1245620005;
   static final long MAXSTAMP = 1245620100 + 1; // maxStamp itself is excluded. so increment it.
- 
+
   static final byte[] TABLE_NAME = Bytes.toBytes("table123");
   static final byte[] FAMILY_NAME = Bytes.toBytes("text");
   static final byte[] COLUMN_NAME = Bytes.toBytes("input");
- 
+
   protected HTableDescriptor desc;
   protected HTable table;
- 
+
   public TestTimeRangeMapRed() {
     super();
     System.setProperty("hadoop.log.dir", conf.get("hadoop.log.dir"));
     conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir"));
     this.setOpenMetaTable(true);
   }
-  
-  @Override 
+
+  @Override
   public void setUp() throws Exception {
     super.setUp();
     desc = new HTableDescriptor(TABLE_NAME);
@@ -95,14 +95,14 @@ public class TestTimeRangeMapRed extends
     admin.createTable(desc);
     table = new HTable(conf, desc.getName());
   }
-  
-  private static class ProcessTimeRangeMapper  
+
+  private static class ProcessTimeRangeMapper
   extends TableMapper<ImmutableBytesWritable, MapWritable>
   implements Configurable {
-    
+
     private Configuration conf = null;
     private HTable table = null;
-    
+
     @Override
     public void map(ImmutableBytesWritable key, Result result,
         Context context)
@@ -111,7 +111,7 @@ public class TestTimeRangeMapRed extends
       for (KeyValue kv : result.sorted()) {
         tsList.add(kv.getTimestamp());
       }
-      
+
       for (Long ts : tsList) {
         Put put = new Put(key.get());
         put.add(FAMILY_NAME, COLUMN_NAME, ts, Bytes.toBytes(true));
@@ -134,10 +134,10 @@ public class TestTimeRangeMapRed extends
         e.printStackTrace();
       }
     }
-    
+
   }
-  
-  public void testTimeRangeMapRed() 
+
+  public void testTimeRangeMapRed()
   throws IOException, InterruptedException, ClassNotFoundException {
     prepareTest();
     runTestOnTable();
@@ -149,11 +149,11 @@ public class TestTimeRangeMapRed extends
       Put put = new Put(KEY);
       put.add(FAMILY_NAME, COLUMN_NAME, entry.getKey(), Bytes.toBytes(false));
       table.put(put);
-    } 
+    }
     table.flushCommits();
   }
-  
-  private void runTestOnTable() 
+
+  private void runTestOnTable()
   throws IOException, InterruptedException, ClassNotFoundException {
     MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);
     Job job = null;
@@ -165,7 +165,7 @@ public class TestTimeRangeMapRed extends
       scan.addColumn(FAMILY_NAME, COLUMN_NAME);
       scan.setTimeRange(MINSTAMP, MAXSTAMP);
       scan.setMaxVersions();
-      TableMapReduceUtil.initTableMapperJob(Bytes.toString(TABLE_NAME), 
+      TableMapReduceUtil.initTableMapperJob(Bytes.toString(TABLE_NAME),
         scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job);
       job.waitForCompletion(true);
     } catch (IOException e) {
@@ -177,7 +177,7 @@ public class TestTimeRangeMapRed extends
         FileUtil.fullyDelete(
           new File(job.getConfiguration().get("hadoop.tmp.dir")));
       }
-    } 
+    }
   }
 
   private void verify() throws IOException {

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/OOMEHMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/OOMEHMaster.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/OOMEHMaster.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/OOMEHMaster.java Fri May  7 19:26:45 2010
@@ -36,13 +36,13 @@ import org.apache.hadoop.hbase.HServerIn
  */
 public class OOMEHMaster extends HMaster {
   private List<byte []> retainer = new ArrayList<byte[]>();
-  
+
   public OOMEHMaster(HBaseConfiguration conf) throws IOException {
     super(conf);
   }
-  
+
   @Override
-  public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg[] msgs, 
+  public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg[] msgs,
     HRegionInfo[] mostLoadedRegions)
   throws IOException {
     // Retain 1M.

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransistions.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransistions.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransistions.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransistions.java Fri May  7 19:26:45 2010
@@ -108,7 +108,7 @@ public class TestMasterTransistions {
       this.copyOfOnlineRegions =
         this.victim.getCopyOfOnlineRegionsSortedBySize().values();
     }
- 
+
     @Override
     public boolean process(HServerInfo serverInfo, HMsg incomingMsg) {
       if (!victim.getServerInfo().equals(serverInfo) ||
@@ -161,7 +161,7 @@ public class TestMasterTransistions {
    * we kill it.  We then wait on all regions to combe back on line.  If bug
    * is fixed, this should happen soon as the processing of the killed server is
    * done.
-   * @see <a href="https://issues.apache.org/jira/browse/HBASE-2482">HBASE-2482</a> 
+   * @see <a href="https://issues.apache.org/jira/browse/HBASE-2482">HBASE-2482</a>
    */
   @Test public void testKillRSWithOpeningRegion2482() throws Exception {
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
@@ -209,7 +209,7 @@ public class TestMasterTransistions {
    * @param cluster
    * @param hrs
    * @return Count of regions closed.
-   * @throws IOException 
+   * @throws IOException
    */
   private int closeAlltNonCatalogRegions(final MiniHBaseCluster cluster,
     final MiniHBaseCluster.MiniHBaseClusterRegionServer hrs)
@@ -247,7 +247,7 @@ public class TestMasterTransistions {
     private int closeCount = 0;
     static final int SERVER_DURATION = 3 * 1000;
     static final int CLOSE_DURATION = 1 * 1000;
- 
+
     HBase2428Listener(final MiniHBaseCluster c, final HServerAddress metaAddress,
         final HRegionInfo closingHRI, final int otherServerIndex) {
       this.cluster = c;
@@ -332,7 +332,7 @@ public class TestMasterTransistions {
   /**
    * In 2428, the meta region has just been set offline and then a close comes
    * in.
-   * @see <a href="https://issues.apache.org/jira/browse/HBASE-2428">HBASE-2428</a> 
+   * @see <a href="https://issues.apache.org/jira/browse/HBASE-2428">HBASE-2428</a>
    */
   @Test public void testRegionCloseWhenNoMetaHBase2428() throws Exception {
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
@@ -351,7 +351,7 @@ public class TestMasterTransistions {
     // Get a region out on the otherServer.
     final HRegionInfo hri =
       otherServer.getOnlineRegions().iterator().next().getRegionInfo();
- 
+
     // Add our ReionServerOperationsListener
     HBase2428Listener listener = new HBase2428Listener(cluster,
       metaHRS.getHServerInfo().getServerAddress(), hri, otherServerIndex);
@@ -414,10 +414,10 @@ public class TestMasterTransistions {
       // If I get to here and all rows have a Server, then all have been assigned.
       if (rows == countOfRegions) break;
       LOG.info("Found=" + rows);
-      Threads.sleep(1000); 
+      Threads.sleep(1000);
     }
   }
-    
+
   /*
    * @return Count of regions in meta table.
    * @throws IOException

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestMinimumServerCount.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestMinimumServerCount.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestMinimumServerCount.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestMinimumServerCount.java Fri May  7 19:26:45 2010
@@ -76,7 +76,7 @@ public class TestMinimumServerCount exte
     }
     Thread.sleep(10 * 1000);
     assertFalse(admin.isTableAvailable(TABLE_NAME));
-    
+
     // now start another region server
     cluster.startRegionServer();
 

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionManager.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionManager.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionManager.java Fri May  7 19:26:45 2010
@@ -44,11 +44,11 @@ public class TestRegionManager extends H
      // 1st .META. region will be something like .META.,,1253625700761
      HRegionInfo metaRegionInfo0 = new HRegionInfo(metaTableDesc, Bytes.toBytes(""), regionInfo0.getRegionName());
      MetaRegion meta0 = new MetaRegion(address, metaRegionInfo0);
-   
+
      byte[] startKey1 = Bytes.toBytes("j");
      byte[] endKey1 = Bytes.toBytes("m");
      HRegionInfo regionInfo1 = new HRegionInfo(tableDesc, startKey1, endKey1);
-     // 2nd .META. region will be something like .META.,_MY_TABLE_,f,1253625700761,1253625700761 
+     // 2nd .META. region will be something like .META.,_MY_TABLE_,f,1253625700761,1253625700761
      HRegionInfo metaRegionInfo1 = new HRegionInfo(metaTableDesc, regionInfo0.getRegionName(), regionInfo1.getRegionName());
      MetaRegion meta1 = new MetaRegion(address, metaRegionInfo1);
 
@@ -60,13 +60,13 @@ public class TestRegionManager extends H
      byte[] startKeyX = Bytes.toBytes("h");
      byte[] endKeyX = Bytes.toBytes("j");
      HRegionInfo regionInfoX = new HRegionInfo(tableDesc, startKeyX, endKeyX);
-   
-   
+
+
      master.getRegionManager().offlineMetaRegion(startKey0);
      master.getRegionManager().putMetaRegionOnline(meta0);
      master.getRegionManager().putMetaRegionOnline(meta1);
      master.getRegionManager().putMetaRegionOnline(meta2);
-   
+
 //    for (byte[] b : master.regionManager.getOnlineMetaRegions().keySet()) {
 //      System.out.println("FROM TEST KEY " + b +"  " +new String(b));
 //    }

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java Fri May  7 19:26:45 2010
@@ -45,7 +45,7 @@ public class TestRegionServerOperationQu
   @After
   public void tearDown() throws Exception {
   }
-  
+
   @Test
   public void testNothing() throws Exception {
   }

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java Fri May  7 19:26:45 2010
@@ -43,14 +43,14 @@ public class TestMetricsMBeanBase extend
       super(registry, "TestStatistics");
     }
   }
-  
+
   private MetricsRegistry registry;
   private MetricsRecord metricsRecord;
   private TestStatistics stats;
   private MetricsRate metricsRate;
   private MetricsIntValue intValue;
   private MetricsTimeVaryingRate varyRate;
-  
+
   public void setUp() {
     this.registry = new MetricsRegistry();
     this.metricsRate = new MetricsRate("metricsRate", registry, "test");
@@ -61,13 +61,13 @@ public class TestMetricsMBeanBase extend
     this.metricsRecord = MetricsUtil.createRecord(context, "test");
     this.metricsRecord.setTag("TestStatistics", "test");
     //context.registerUpdater(this);
-    
+
   }
-  
+
   public void tearDown() {
-    
+
   }
-  
+
   public void testGetAttribute() throws Exception {
     this.metricsRate.inc(2);
     this.metricsRate.pushMetric(this.metricsRecord);
@@ -76,8 +76,8 @@ public class TestMetricsMBeanBase extend
     this.varyRate.inc(10);
     this.varyRate.inc(50);
     this.varyRate.pushMetric(this.metricsRecord);
-    
-    
+
+
     assertEquals( 2.0, (Float)this.stats.getAttribute("metricsRate"), 0.001 );
     assertEquals( 5, this.stats.getAttribute("intValue") );
     assertEquals( 10L, this.stats.getAttribute("varyRateMinTime") );
@@ -85,17 +85,17 @@ public class TestMetricsMBeanBase extend
     assertEquals( 30L, this.stats.getAttribute("varyRateAvgTime") );
     assertEquals( 2, this.stats.getAttribute("varyRateNumOps") );
   }
-  
+
   public void testGetMBeanInfo() {
     MBeanInfo info = this.stats.getMBeanInfo();
     MBeanAttributeInfo[] attributes = info.getAttributes();
     assertEquals( 6, attributes.length );
-    
-    Map<String,MBeanAttributeInfo> attributeByName = 
+
+    Map<String,MBeanAttributeInfo> attributeByName =
         new HashMap<String,MBeanAttributeInfo>(attributes.length);
     for (MBeanAttributeInfo attr : attributes)
       attributeByName.put(attr.getName(), attr);
-    
+
     assertAttribute( attributeByName.get("metricsRate"),
         "metricsRate", "java.lang.Float", "test");
     assertAttribute( attributeByName.get("intValue"),
@@ -109,10 +109,10 @@ public class TestMetricsMBeanBase extend
     assertAttribute( attributeByName.get("varyRateNumOps"),
         "varyRateNumOps", "java.lang.Integer", "test");
   }
-  
+
   protected void assertAttribute(MBeanAttributeInfo attr, String name,
       String type, String description) {
-    
+
     assertEquals(attr.getName(), name);
     assertEquals(attr.getType(), type);
     assertEquals(attr.getDescription(), description);

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java Fri May  7 19:26:45 2010
@@ -51,11 +51,11 @@ public class DisabledTestRegionServerExi
   public DisabledTestRegionServerExit() {
     super(2);
     conf.setInt("ipc.client.connect.max.retries", 5); // reduce ipc retries
-    conf.setInt("ipc.client.timeout", 10000);         // and ipc timeout 
+    conf.setInt("ipc.client.timeout", 10000);         // and ipc timeout
     conf.setInt("hbase.client.pause", 10000);         // increase client timeout
     conf.setInt("hbase.client.retries.number", 10);   // increase HBase retries
   }
-  
+
   /**
    * Test abort of region server.
    * @throws IOException
@@ -77,7 +77,7 @@ public class DisabledTestRegionServerExi
     t.start();
     threadDumpingJoin(t);
   }
-  
+
   /**
    * Test abort of region server.
    * Test is flakey up on hudson.  Needs work.
@@ -100,7 +100,7 @@ public class DisabledTestRegionServerExi
     t.start();
     threadDumpingJoin(t);
   }
-  
+
   private byte [] createTableAndAddRow(final String tableName)
   throws IOException {
     HTableDescriptor desc = new HTableDescriptor(tableName);
@@ -119,14 +119,14 @@ public class DisabledTestRegionServerExi
   /*
    * Stop the region server serving the meta region and wait for the meta region
    * to get reassigned. This is always the most problematic case.
-   * 
+   *
    * @param abort set to true if region server should be aborted, if false it
    * is just shut down.
    */
   private void stopOrAbortMetaRegionServer(boolean abort) {
     List<JVMClusterUtil.RegionServerThread> regionThreads =
       cluster.getRegionServerThreads();
-    
+
     int server = -1;
     for (int i = 0; i < regionThreads.size() && server == -1; i++) {
       HRegionServer s = regionThreads.get(i).getRegionServer();
@@ -144,14 +144,14 @@ public class DisabledTestRegionServerExi
     }
     if (abort) {
       this.cluster.abortRegionServer(server);
-      
+
     } else {
       this.cluster.stopRegionServer(server);
     }
     LOG.info(this.cluster.waitOnRegionServer(server) + " has been " +
         (abort ? "aborted" : "shut down"));
   }
-  
+
   /*
    * Run verification in a thread so I can concurrently run a thread-dumper
    * while we're waiting (because in this test sometimes the meta scanner
@@ -173,7 +173,7 @@ public class DisabledTestRegionServerExi
 
           ResultScanner s = t.getScanner(scan);
           s.close();
-          
+
         } catch (IOException e) {
           LOG.fatal("could not re-open meta table because", e);
           fail();

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java Fri May  7 19:26:45 2010
@@ -39,7 +39,7 @@ public class OOMERegionServer extends HR
   public OOMERegionServer(HBaseConfiguration conf) throws IOException {
     super(conf);
   }
-  
+
   public void put(byte [] regionName, Put put)
   throws IOException {
     super.put(regionName, put);
@@ -48,7 +48,7 @@ public class OOMERegionServer extends HR
       this.retainer.add(put);
     }
   }
-  
+
   public static void main(String[] args) {
     HRegionServer.doMain(args, OOMERegionServer.class);
   }

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Fri May  7 19:26:45 2010
@@ -54,17 +54,17 @@ public class TestCompaction extends HBas
   private static final int COMPACTION_THRESHOLD = MAXVERSIONS;
 
   private MiniDFSCluster cluster;
-  
+
   /** constructor */
   public TestCompaction() {
     super();
-    
+
     // Set cache flush size to 1MB
     conf.setInt("hbase.hregion.memstore.flush.size", 1024*1024);
     conf.setInt("hbase.hregion.memstore.block.multiplier", 10);
     this.cluster = null;
   }
-  
+
   @Override
   public void setUp() throws Exception {
     this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
@@ -75,10 +75,10 @@ public class TestCompaction extends HBas
     HTableDescriptor htd = createTableDescriptor(getName());
     this.r = createNewHRegion(htd, null, null);
     this.compactionDir = HRegion.getCompactionDir(this.r.getBaseDir());
-    this.regionCompactionDir = new Path(this.compactionDir, 
+    this.regionCompactionDir = new Path(this.compactionDir,
         Integer.toString(this.r.getRegionInfo().getEncodedName()));
   }
-  
+
   @Override
   public void tearDown() throws Exception {
     HLog hlog = r.getLog();
@@ -139,7 +139,7 @@ public class TestCompaction extends HBas
     // Assert == 3 when we ask for versions.
     addContent(new HRegionIncommon(r), Bytes.toString(COLUMN_FAMILY));
 
-    
+
     // FIX!!
 //    Cell[] cellValues =
 //      Cell.createSingleCellArray(r.get(STARTROW, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
@@ -177,7 +177,7 @@ public class TestCompaction extends HBas
     byte [][] famAndQf = {COLUMN_FAMILY, null};
     delete.deleteFamily(famAndQf[0]);
     r.delete(delete, null, true);
-    
+
     // Assert deleted.
 
     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
@@ -258,7 +258,7 @@ public class TestCompaction extends HBas
   }
 
   private void createSmallerStoreFile(final HRegion region) throws IOException {
-    HRegionIncommon loader = new HRegionIncommon(region); 
+    HRegionIncommon loader = new HRegionIncommon(region);
     addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
     		"bbb").getBytes(), null);
     loader.flushcache();

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java Fri May  7 19:26:45 2010
@@ -73,21 +73,21 @@ public class TestDeleteCompare extends T
     int deleteQualifierLen = delete.getQualifierLength();
     int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
     byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
-    
+
     List<DeleteCode> actual = new ArrayList<DeleteCode>();
     for(KeyValue mem : memstore){
     actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
         deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
         deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
-      
+
     }
-    
+
     assertEquals(expected.size(), actual.size());
     for(int i=0; i<expected.size(); i++){
       assertEquals(expected.get(i), actual.get(i));
     }
   }
-  
+
   public void testDeleteCompare_DeleteColumn() {
     //Creating memstore
     Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
@@ -103,7 +103,7 @@ public class TestDeleteCompare extends T
     expected.add(DeleteCode.DELETE);
     expected.add(DeleteCode.DELETE);
     expected.add(DeleteCode.DONE);
-    
+
     KeyValue delete = KeyValueTestUtil.create("row11", "fam", "col1", 2,
         KeyValue.Type.DeleteColumn, "dont-care");
     byte [] deleteBuffer = delete.getBuffer();
@@ -113,22 +113,22 @@ public class TestDeleteCompare extends T
     int deleteQualifierLen = delete.getQualifierLength();
     int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
     byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
-    
+
     List<DeleteCode> actual = new ArrayList<DeleteCode>();
     for(KeyValue mem : memstore){
     actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
         deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
         deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
-      
+
     }
-    
+
     assertEquals(expected.size(), actual.size());
     for(int i=0; i<expected.size(); i++){
       assertEquals(expected.get(i), actual.get(i));
     }
   }
-  
-  
+
+
   public void testDeleteCompare_Delete() {
     //Creating memstore
     Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
@@ -141,7 +141,7 @@ public class TestDeleteCompare extends T
     expected.add(DeleteCode.SKIP);
     expected.add(DeleteCode.DELETE);
     expected.add(DeleteCode.DONE);
-    
+
     KeyValue delete = KeyValueTestUtil.create("row11", "fam", "col1", 2,
         KeyValue.Type.Delete, "dont-care");
     byte [] deleteBuffer = delete.getBuffer();
@@ -151,20 +151,20 @@ public class TestDeleteCompare extends T
     int deleteQualifierLen = delete.getQualifierLength();
     int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
     byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
-    
+
     List<DeleteCode> actual = new ArrayList<DeleteCode>();
     for(KeyValue mem : memstore){
     actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
         deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
         deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
     }
-    
+
     assertEquals(expected.size(), actual.size());
     for(int i=0; i<expected.size(); i++){
       assertEquals(expected.get(i), actual.get(i));
     }
   }
-  
+
   public void testDeleteCompare_Multiple() {
     //Creating memstore
     Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
@@ -194,15 +194,15 @@ public class TestDeleteCompare extends T
     int deleteQualifierLen = delete.getQualifierLength();
     int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
     byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
-    
+
     List<DeleteCode> actual = new ArrayList<DeleteCode>();
     for(KeyValue mem : memstore){
     actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
         deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
         deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
-      
+
     }
-    
+
     assertEquals(expected.size(), actual.size());
     for(int i=0; i<expected.size(); i++){
       assertEquals(expected.get(i), actual.get(i));

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java Fri May  7 19:26:45 2010
@@ -40,7 +40,7 @@ implements HConstants {
   private final byte [] col4 = Bytes.toBytes("col4");
   private final byte [] col5 = Bytes.toBytes("col5");
 
-  
+
   public void testGet_SingleVersion(){
     if(PRINT){
       System.out.println("SingleVersion");
@@ -58,9 +58,9 @@ implements HConstants {
     expected.add(MatchCode.INCLUDE);
     expected.add(MatchCode.DONE);
     int maxVersions = 1;
-    
+
     ColumnTracker exp = new ExplicitColumnTracker(columns, maxVersions);
-        
+
     //Create "Scanner"
     List<byte[]> scanner = new ArrayList<byte[]>();
     scanner.add(col1);
@@ -68,15 +68,15 @@ implements HConstants {
     scanner.add(col3);
     scanner.add(col4);
     scanner.add(col5);
-    
+
     //Initialize result
-    List<MatchCode> result = new ArrayList<MatchCode>(); 
-    
+    List<MatchCode> result = new ArrayList<MatchCode>();
+
     //"Match"
     for(byte [] col : scanner){
       result.add(exp.checkColumn(col, 0, col.length));
     }
-    
+
     assertEquals(expected.size(), result.size());
     for(int i=0; i< expected.size(); i++){
       assertEquals(expected.get(i), result.get(i));
@@ -86,18 +86,18 @@ implements HConstants {
       }
     }
   }
-  
+
   public void testGet_MultiVersion(){
     if(PRINT){
       System.out.println("\nMultiVersion");
     }
-    
+
     //Create tracker
     TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
     //Looking for every other
     columns.add(col2);
     columns.add(col4);
-    
+
     List<MatchCode> expected = new ArrayList<MatchCode>();
     expected.add(MatchCode.SKIP);
     expected.add(MatchCode.SKIP);
@@ -119,9 +119,9 @@ implements HConstants {
     expected.add(MatchCode.DONE);
     expected.add(MatchCode.DONE);
     int maxVersions = 2;
-    
+
     ColumnTracker exp = new ExplicitColumnTracker(columns, maxVersions);
-        
+
     //Create "Scanner"
     List<byte[]> scanner = new ArrayList<byte[]>();
     scanner.add(col1);
@@ -139,15 +139,15 @@ implements HConstants {
     scanner.add(col5);
     scanner.add(col5);
     scanner.add(col5);
-    
+
     //Initialize result
-    List<MatchCode> result = new ArrayList<MatchCode>(); 
-    
+    List<MatchCode> result = new ArrayList<MatchCode>();
+
     //"Match"
     for(byte [] col : scanner){
       result.add(exp.checkColumn(col, 0, col.length));
     }
-    
+
     assertEquals(expected.size(), result.size());
     for(int i=0; i< expected.size(); i++){
       assertEquals(expected.get(i), result.get(i));
@@ -182,5 +182,5 @@ implements HConstants {
     }
   }
 
-  
+
 }

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java Fri May  7 19:26:45 2010
@@ -42,12 +42,12 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 
 /**
  * {@link TestGet} is a medley of tests of get all done up as a single test.
- * This class 
+ * This class
  */
 public class TestGetClosestAtOrBefore extends HBaseTestCase implements HConstants {
   static final Log LOG = LogFactory.getLog(TestGetClosestAtOrBefore.class);
   private MiniDFSCluster miniHdfs;
-  
+
   private static final byte [] T00 = Bytes.toBytes("000");
   private static final byte [] T10 = Bytes.toBytes("010");
   private static final byte [] T11 = Bytes.toBytes("011");
@@ -184,36 +184,36 @@ public class TestGetClosestAtOrBefore ex
     try {
       HTableDescriptor htd = createTableDescriptor(getName());
       region = createNewHRegion(htd, null, null);
-      
+
       Put p = new Put(T00);
       p.add(c0, c0, T00);
       region.put(p);
-      
+
       p = new Put(T10);
       p.add(c0, c0, T10);
       region.put(p);
-      
+
       p = new Put(T20);
       p.add(c0, c0, T20);
       region.put(p);
-      
+
       Result r = region.getClosestRowBefore(T20, c0);
       assertTrue(Bytes.equals(T20, r.getRow()));
-      
+
       Delete d = new Delete(T20);
       d.deleteColumn(c0, c0);
       region.delete(d, null, false);
-      
+
       r = region.getClosestRowBefore(T20, c0);
       assertTrue(Bytes.equals(T10, r.getRow()));
-      
+
       p = new Put(T30);
       p.add(c0, c0, T30);
       region.put(p);
-      
+
       r = region.getClosestRowBefore(T30, c0);
       assertTrue(Bytes.equals(T30, r.getRow()));
-      
+
       d = new Delete(T30);
       d.deleteColumn(c0, c0);
       region.delete(d, null, false);
@@ -230,7 +230,7 @@ public class TestGetClosestAtOrBefore ex
       assertTrue(Bytes.equals(T10, r.getRow()));
       r = region.getClosestRowBefore(T31, c0);
       assertTrue(Bytes.equals(T10, r.getRow()));
-      
+
       // Put into a different column family.  Should make it so I still get t10
       p = new Put(T20);
       p.add(c1, c1, T20);
@@ -240,14 +240,14 @@ public class TestGetClosestAtOrBefore ex
       assertTrue(Bytes.equals(T10, r.getRow()));
       r = region.getClosestRowBefore(T31, c0);
       assertTrue(Bytes.equals(T10, r.getRow()));
-      
+
       region.flushcache();
-      
+
       r = region.getClosestRowBefore(T30, c0);
       assertTrue(Bytes.equals(T10, r.getRow()));
       r = region.getClosestRowBefore(T31, c0);
       assertTrue(Bytes.equals(T10, r.getRow()));
-      
+
       // Now try combo of memcache and mapfiles.  Delete the t20 COLUMS[1]
       // in memory; make sure we get back t10 again.
       d = new Delete(T20);
@@ -255,14 +255,14 @@ public class TestGetClosestAtOrBefore ex
       region.delete(d, null, false);
       r = region.getClosestRowBefore(T30, c0);
       assertTrue(Bytes.equals(T10, r.getRow()));
-      
+
       // Ask for a value off the end of the file.  Should return t10.
       r = region.getClosestRowBefore(T31, c0);
       assertTrue(Bytes.equals(T10, r.getRow()));
       region.flushcache();
       r = region.getClosestRowBefore(T31, c0);
       assertTrue(Bytes.equals(T10, r.getRow()));
-      
+
       // Ok.  Let the candidate come out of hfile but have delete of
       // the candidate be in memory.
       p = new Put(T11);
@@ -291,15 +291,15 @@ public class TestGetClosestAtOrBefore ex
     try {
       HTableDescriptor htd = createTableDescriptor(getName());
       region = createNewHRegion(htd, null, null);
-      
+
       Put p = new Put(T10);
       p.add(c0, c0, T10);
       region.put(p);
-      
+
       p = new Put(T30);
       p.add(c0, c0, T30);
       region.put(p);
-      
+
       p = new Put(T40);
       p.add(c0, c0, T40);
       region.put(p);
@@ -317,11 +317,11 @@ public class TestGetClosestAtOrBefore ex
       p = new Put(T20);
       p.add(c0, c0, T20);
       region.put(p);
-      
+
       // try finding "035"
       r = region.getClosestRowBefore(T35, c0);
       assertTrue(Bytes.equals(T30, r.getRow()));
-      
+
       region.flushcache();
 
       // try finding "035"

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java Fri May  7 19:26:45 2010
@@ -31,37 +31,37 @@ import org.apache.hadoop.hbase.util.Byte
 
 
 public class TestGetDeleteTracker extends HBaseTestCase implements HConstants {
-  
+
   private static final boolean PRINT = true;
-  
+
   private byte [] col1 = null;
   private byte [] col2 = null;
-  
+
   private int col1Len = 0;
   private int col2Len = 0;
 
   private byte [] empty = null;
-  
+
   private long ts1 = 0L;
   private long ts2 = 0L;
   private long ts3 = 0L;
-  
-  
+
+
   private Delete del10 = null;
   private Delete del11 = null;
   private Delete delQf10 = null;
   private Delete delQf11 = null;
   private Delete delFam10 = null;
-  
+
   private Delete del20 = null;
   private Delete del21 = null;
   private Delete delQf20 = null;
   private Delete delQf21 = null;
   private Delete delFam20 = null;
-  
-  
+
+
   private Delete del30 = null;
-  
+
   GetDeleteTracker dt = null;
   private byte del = KeyValue.Type.Delete.getCode();
   private byte delCol = KeyValue.Type.DeleteColumn.getCode();
@@ -74,7 +74,7 @@ public class TestGetDeleteTracker extend
     col2 = "col2".getBytes();
     col1Len = col1.length;
     col2Len = col2.length;
-    
+
     empty = new byte[0];
 
     //ts1
@@ -84,7 +84,7 @@ public class TestGetDeleteTracker extend
     delQf10 = new Delete(col1, 0, col1Len, delCol, ts1);
     delQf11 = new Delete(col2, 0, col2Len, delCol, ts1);
     delFam10 = new Delete(empty, 0, 0, delFam, ts1);
-    
+
     //ts2
     ts2 = System.nanoTime();
     del20 = new Delete(col1, 0, col1Len, del, ts2);
@@ -92,109 +92,109 @@ public class TestGetDeleteTracker extend
     delQf20 = new Delete(col1, 0, col1Len, delCol, ts2);
     delQf21 = new Delete(col2, 0, col2Len, delCol, ts2);
     delFam20 = new Delete(empty, 0, 0, delFam, ts1);
-    
+
     //ts3
     ts3 = System.nanoTime();
     del30 = new Delete(col1, 0, col1Len, del, ts3);
   }
-  
+
   public void testUpdate_CompareDeletes() {
     GetDeleteTracker.DeleteCompare res = null;
-    
-    
+
+
     //Testing Delete and Delete
     res = dt.compareDeletes(del10, del10);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_BOTH, res);
-    
-    //Testing Delete qf1 and Delete qf2 and <==> 
+
+    //Testing Delete qf1 and Delete qf2 and <==>
     res = dt.compareDeletes(del10, del11);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
     res = dt.compareDeletes(del11, del10);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
-        
-    //Testing Delete ts1 and Delete ts2 and <==> 
+
+    //Testing Delete ts1 and Delete ts2 and <==>
     res = dt.compareDeletes(del10, del20);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
     res = dt.compareDeletes(del20, del10);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
-    
-    
-    
+
+
+
     //Testing DeleteColumn and DeleteColumn
     res = dt.compareDeletes(delQf10, delQf10);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_BOTH, res);
-    
-    //Testing DeleteColumn qf1 and DeleteColumn qf2 and <==> 
+
+    //Testing DeleteColumn qf1 and DeleteColumn qf2 and <==>
     res = dt.compareDeletes(delQf10, delQf11);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
     res = dt.compareDeletes(delQf11, delQf10);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
-    
-    //Testing DeleteColumn ts1 and DeleteColumn ts2 and <==> 
+
+    //Testing DeleteColumn ts1 and DeleteColumn ts2 and <==>
     res = dt.compareDeletes(delQf10, delQf20);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_BOTH, res);
     res = dt.compareDeletes(delQf20, delQf10);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_BOTH, res);
-    
-    
-    
-    //Testing Delete and DeleteColumn and <==> 
+
+
+
+    //Testing Delete and DeleteColumn and <==>
     res = dt.compareDeletes(del10, delQf10);
     assertEquals(DeleteTracker.DeleteCompare.NEXT_OLD, res);
     res = dt.compareDeletes(delQf10, del10);
     assertEquals(DeleteTracker.DeleteCompare.NEXT_NEW, res);
 
-    //Testing Delete qf1 and DeleteColumn qf2 and <==> 
+    //Testing Delete qf1 and DeleteColumn qf2 and <==>
     res = dt.compareDeletes(del10, delQf11);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
     res = dt.compareDeletes(delQf11, del10);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
-    
-    //Testing Delete qf2 and DeleteColumn qf1 and <==> 
+
+    //Testing Delete qf2 and DeleteColumn qf1 and <==>
     res = dt.compareDeletes(del11, delQf10);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
     res = dt.compareDeletes(delQf10, del11);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
-    
-    //Testing Delete ts2 and DeleteColumn ts1 and <==> 
+
+    //Testing Delete ts2 and DeleteColumn ts1 and <==>
     res = dt.compareDeletes(del20, delQf10);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
     res = dt.compareDeletes(delQf10, del20);
     assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
- 
-    //Testing Delete ts1 and DeleteColumn ts2 and <==> 
+
+    //Testing Delete ts1 and DeleteColumn ts2 and <==>
     res = dt.compareDeletes(del10, delQf20);
     assertEquals(DeleteTracker.DeleteCompare.NEXT_OLD, res);
     res = dt.compareDeletes(delQf20, del10);
     assertEquals(DeleteTracker.DeleteCompare.NEXT_NEW, res);
-    
+
   }
-  
+
   public void testUpdate(){
     //Building lists
     List<Delete> dels1 = new ArrayList<Delete>();
     dels1.add(delQf10);
     dels1.add(del21);
-    
+
     List<Delete> dels2 = new ArrayList<Delete>();
     dels2.add(delFam10);
     dels2.add(del30);
     dels2.add(delQf20);
-    
+
     List<Delete> res = new ArrayList<Delete>();
     res.add(del30);
     res.add(delQf20);
     res.add(del21);
-    
+
     //Adding entries
     for(Delete del : dels1){
       dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
           del.timestamp, del.type);
     }
-    
+
     //update()
     dt.update();
-    
+
     //Check deleteList
     List<Delete> delList = dt.deletes;
     assertEquals(dels1.size(), delList.size());
@@ -206,7 +206,7 @@ public class TestGetDeleteTracker extend
       assertEquals(dels1.get(i).timestamp, delList.get(i).timestamp);
       assertEquals(dels1.get(i).type, delList.get(i).type);
     }
-    
+
     //Add more entries
     for(Delete del : dels2){
       dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
@@ -214,7 +214,7 @@ public class TestGetDeleteTracker extend
     }
     //Update()
     dt.update();
-    
+
     //Check deleteList
     delList = dt.deletes;
 
@@ -226,14 +226,14 @@ public class TestGetDeleteTracker extend
       assertEquals(res.get(i).timestamp, delList.get(i).timestamp);
       assertEquals(res.get(i).type, delList.get(i).type);
       if(PRINT){
-        System.out.println("Qf " +new String(delList.get(i).buffer) + 
-            ", timestamp, " +delList.get(i).timestamp+ 
+        System.out.println("Qf " +new String(delList.get(i).buffer) +
+            ", timestamp, " +delList.get(i).timestamp+
             ", type " +KeyValue.Type.codeToType(delList.get(i).type));
       }
     }
-    
+
   }
-  
+
   /**
    * Test if a KeyValue is in the lists of deletes already. Cases that needs to
    * be tested are:
@@ -247,7 +247,7 @@ public class TestGetDeleteTracker extend
     List<Delete> dels = new ArrayList<Delete>();
     dels.add(delQf10);
     dels.add(del21);
-    
+
     //Adding entries
     for(Delete del : dels){
       dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
@@ -262,50 +262,50 @@ public class TestGetDeleteTracker extend
     //Building lists
     List<Delete> dels = new ArrayList<Delete>();
     dels.add(del21);
-    
+
     //Adding entries
     for(Delete del : dels){
-      dt.add(del.buffer, del.qualifierOffset, del.qualifierLength, 
+      dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
           del.timestamp, del.type);
     }
-    
+
     //update()
     dt.update();
-    
+
     assertEquals(true, dt.isDeleted(col2, 0, col2Len, ts2));
   }
-  
+
   public void testIsDeleted_DeleteColumn(){
     //Building lists
     List<Delete> dels = new ArrayList<Delete>();
     dels.add(delQf21);
-    
+
     //Adding entries
     for(Delete del : dels){
       dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
           del.timestamp, del.type);
     }
-    
+
     //update()
     dt.update();
-    
+
     assertEquals(true, dt.isDeleted(col2, 0, col2Len, ts1));
   }
-  
+
   public void testIsDeleted_DeleteFamily(){
     //Building lists
     List<Delete> dels = new ArrayList<Delete>();
     dels.add(delFam20);
-    
+
     //Adding entries
     for(Delete del : dels){
       dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
           del.timestamp, del.type);
     }
-    
+
     //update()
     dt.update();
-    
+
     assertEquals(true, dt.isDeleted(col2, 0, col2Len, ts1));
   }
 
@@ -323,5 +323,5 @@ public class TestGetDeleteTracker extend
     dt.update();
     assertEquals(false, dt.isDeleted(col2, 0, col2Len, 7000000));
   }
-  
+
 }



Mime
View raw message