hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From li...@apache.org
Subject svn commit: r1417594 - in /hbase/branches/0.89-fb/src: main/java/org/apache/hadoop/hbase/regionserver/Store.java test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
Date Wed, 05 Dec 2012 19:21:04 GMT
Author: liyin
Date: Wed Dec  5 19:21:03 2012
New Revision: 1417594

URL: http://svn.apache.org/viewvc?rev=1417594&view=rev
Log:
[HBASE-7267] Only create the dummy hfile for the compaction if necessary.

Author: liyintang

Summary:
In HBASE-6059, it introduced a new behavior that the compaction would create the HFileWriter
no mater whether there is any key/value as the output or not. This new behavior actually is
conflicts with HBASE-5199 (Delete out of TTL store files before compaction selection) so that
compacting the expired hfiles would generate one more expired hfiles.
Actually we only needs to create the dummy hfile IFF the maxSequenceID among the compaction
candidates is equal to the maxSequenceID among all the on-disk hfiles.

Also I revert some the unit test changes for the TestStore from https://reviews.facebook.net/D5175
based on this new behavior

Test Plan: running all the unit tests

Reviewers: kannan, kranganathan, adela

Reviewed By: kannan

CC: hbase-eng@

Differential Revision: https://phabricator.fb.com/D648192

Modified:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1417594&r1=1417593&r2=1417594&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Wed
Dec  5 19:21:03 2012
@@ -1139,13 +1139,13 @@ public class Store extends SchemaConfigu
    *
    * @param filesToCompact which files to compact
    * @param majorCompaction true to major compact (prune all deletes, max versions, etc)
-   * @param maxId Readers maximum sequence id.
+   * @param maxCompactingSequcenceId The maximum sequence id among the filesToCompact
    * @return Product of compaction or null if all cells expired or deleted and
    * nothing made it through the compaction.
    * @throws IOException
    */
   StoreFile.Writer compactStores(final Collection<StoreFile> filesToCompact,
-                               final boolean majorCompaction, final long maxId)
+                               final boolean majorCompaction, final long maxCompactingSequcenceId)
       throws IOException {
     // calculate maximum key count (for blooms), and minFlushTime after compaction
     long maxKeyCount = 0;
@@ -1199,15 +1199,18 @@ public class Store extends SchemaConfigu
         // we have to use a do/while loop.
         ArrayList<KeyValue> kvs = new ArrayList<KeyValue>();
         boolean hasMore;
+        // Create the writer whether or not there are output KVs, 
+        // iff the maxSequenceID among the compaction candidates is 
+        // equal to the maxSequenceID among all the on-disk hfiles. [HBASE-7267]
+        if (maxCompactingSequcenceId == this.getMaxSequenceId(true)) {
+          writer = createWriterInTmp(maxKeyCount, compression, true);
+        }
         do {
           hasMore = scanner.next(kvs, 1);
-          // Create the writer even if no kv(Empty store file is also ok),
-          // because we need record the max seq id for the store file, see
-          // HBASE-6059
-          if (writer == null) {
-            writer = createWriterInTmp(maxKeyCount, compression, true);
-          }
           if (!kvs.isEmpty()) {
+            if (writer == null) {
+              writer = createWriterInTmp(maxKeyCount, compression, true);
+            }
             // output to writer:
             for (KeyValue kv : kvs) {
               if (kv.getMemstoreTS() <= smallestReadPoint) {
@@ -1253,7 +1256,7 @@ public class Store extends SchemaConfigu
         if (minFlushTime == Long.MAX_VALUE) {
           minFlushTime = HConstants.NO_MIN_FLUSH_TIME;
         }
-        writer.appendMetadata(minFlushTime, maxId, majorCompaction);
+        writer.appendMetadata(minFlushTime, maxCompactingSequcenceId, majorCompaction);
         writer.close();
       }
     }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1417594&r1=1417593&r2=1417594&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
Wed Dec  5 19:21:03 2012
@@ -163,25 +163,24 @@ public class TestStore extends TestCase 
     // Verify the total number of store files
     assertEquals(storeFileNum, this.store.getStorefiles().size());
 
+    // Advanced the max sequence ID by flushing one more file
+    this.store.add(new KeyValue(row, family, qf3, System.currentTimeMillis(), (byte[]) null));
+    flush(storeFileNum + 1);
+    
     // Each compaction request will find one expired store file and delete it
     // by the compaction.
     for (int i = 1; i <= storeFileNum; i++) {
       // verify the expired store file.
       CompactionRequest cr = this.store.requestCompaction();
-      // the first is expired normally.
-      // If not the first compaction, there is another empty store file,
-      assertEquals(Math.min(i, 2), cr.getFiles().size());
-      for (int j = 0; i < cr.getFiles().size(); j++) {
-        assertTrue(cr.getFiles().get(j).getReader().getMaxTimestamp() < (System
-            .currentTimeMillis() - this.store.ttl));
-      }
+      
+      assertEquals(1, cr.getFiles().size());
+      assertTrue(cr.getFiles().get(0).getReader().getMaxTimestamp() <
+          (System.currentTimeMillis() - store.ttl));
+      
       // Verify that the expired store file is compacted to an empty store file.
       this.store.compact(cr);
-
       // It is an empty store file.
-      assertEquals(0, this.store.getStorefiles().get(0).getReader()
-          .getEntries());
-
+      assertEquals(storeFileNum + 1 - i, this.store.getStorefiles().size());
       // Let the next store file expired.
       Thread.sleep(sleepTime);
     }



Mime
View raw message