hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r678650 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/util/
Date Tue, 22 Jul 2008 05:49:36 GMT
Author: stack
Date: Mon Jul 21 22:49:36 2008
New Revision: 678650

URL: http://svn.apache.org/viewvc?rev=678650&view=rev
Log:
HBASE-745 scaling of one regionserver, improving memory and cpu usage
HBASE-757 TestMetaUtils failing on hudson


Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMetaUtils.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=678650&r1=678649&r2=678650&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Jul 21 22:49:36 2008
@@ -198,6 +198,14 @@
    HBASE-679   Regionserver addresses are still not right in the new tables page
    HBASE-758   Throwing IOE read-only when should be throwing NSRE
    HBASE-743   bin/hbase migrate upgrade fails when redo logs exists
+   HBASE-754   The JRuby shell documentation is wrong in "get" and "put"
+               (Jean-Daniel Cryans via Stack)
+   HBASE-756   In HBase shell, the put command doesn't process the timestamp
+               (Jean-Daniel Cryans via Stack)
+   HBASE-757   REST mangles table names (Sishen via Stack)
+   HBASE-706   On OOME, regionserver sticks around and doesn't go down with cluster
+               (Jean-Daniel Cryans via Stack)
+   HBASE-759   TestMetaUtils failing on hudson
    
   IMPROVEMENTS
    HBASE-559   MR example job to count table rows
@@ -294,18 +302,12 @@
                (Jean-Daniel Cryans via Stack)
    HBASE-731   Add a meta refresh tag to the Web ui for master and region server
                (Jean-Daniel Cryans via Stack)
-   HBASE-706   On OOME, regionserver sticks around and doesn't go down with cluster
-               (Jean-Daniel Cryans via Stack)
-   HBASE-735   hbase shell doesn't trap CTRL-C signal
-               (Jean-Daniel Cryans via Stack)
+   HBASE-735   hbase shell doesn't trap CTRL-C signal (Jean-Daniel Cryans via Stack)
    HBASE-730   On startup, rinse STARTCODE and SERVER from .META.
                (Jean-Daniel Cryans via Stack)
    HBASE-738   overview.html in need of updating (Izaak Rubin via Stack)
-   HBASE-754   The JRuby shell documentation is wrong in "get" and "put"
-               (Jean-Daniel Cryans via Stack)
-   HBASE-756   In HBase shell, the put command doesn't process the timestamp
-               (Jean-Daniel Cryans via Stack)
-   HBASE-757   REST mangles table names (Sishen via Stack)
+   HBASE-745   scaling of one regionserver, improving memory and cpu usage (partial)
+               (LN via Stack)
 
   NEW FEATURES
    HBASE-47    Option to set TTL for columns in hbase

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=678650&r1=678649&r2=678650&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
Mon Jul 21 22:49:36 2008
@@ -153,7 +153,6 @@
       }
       t = meta;
     }
-    LOG.info("Updating " + Bytes.toString(t.getTableName()) + " with region split info");
 
     // Mark old region as offline and split in META.
     // NOTE: there is no need for retry logic here. HTable does it for us.
@@ -177,9 +176,6 @@
     }
         
     // Now tell the master about the new regions
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Reporting region split to master");
-    }
     server.reportSplit(oldRegionInfo, newRegions[0].getRegionInfo(),
       newRegions[1].getRegionInfo());
     LOG.info("region split, META updated, and report to master all" +

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=678650&r1=678649&r2=678650&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Mon Jul
21 22:49:36 2008
@@ -1008,18 +1008,19 @@
    */
   private boolean internalFlushcache() throws IOException {
     final long startTime = System.currentTimeMillis();
-    
     // Clear flush flag.
     this.flushRequested = false;
-    
     // Record latest flush time
     this.lastFlushTime = startTime;
-  
+    // If nothing to flush, return and avoid logging start/stop flush.
+    if (this.memcacheSize.get() <= 0) {
+      return false;
+    }
     if (LOG.isDebugEnabled()) {
       LOG.debug("Started memcache flush for region " + this +
         ". Current region memcache size " +
           StringUtils.humanReadableInt(this.memcacheSize.get()));
-      }
+    }
 
     // Stop updates while we snapshot the memcache of all stores. We only have
     // to do this for a moment.  Its quick.  The subsequent sequence id that

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=678650&r1=678649&r2=678650&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java Mon Jul 21
22:49:36 2008
@@ -23,6 +23,7 @@
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -699,6 +700,21 @@
   // Compaction
   //////////////////////////////////////////////////////////////////////////////
 
+  /*
+   * @param files
+   * @return True if any of the files in <code>files</code> are References.
+   */
+  private boolean hasReferences(Collection<HStoreFile> files) {
+    if (files != null && files.size() > 0) {
+      for (HStoreFile hsf: files) {
+        if (hsf.isReference()) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+  
   /**
    * Compact the back-HStores.  This method may take some time, so the calling 
    * thread must be able to block for long periods.
@@ -742,6 +758,40 @@
         LOG.warn("Mkdir on " + compactionDir.toString() + " failed");
         return checkSplit();
       }
+
+      // HBASE-745, preparing all store file size for incremental compacting selection.
+      int countOfFiles = filesToCompact.size();
+      long totalSize = 0;
+      long[] fileSizes = new long[countOfFiles];
+      long skipped = 0;
+      int point = 0;
+      for (int i = 0; i < countOfFiles; i++) {
+        HStoreFile file = filesToCompact.get(i);
+        Path path = file.getMapFilePath();
+        int len = 0;
+        for (FileStatus fstatus:fs.listStatus(path)) {
+          len += fstatus.getLen();
+        }
+        fileSizes[i] = len;
+        totalSize += len;
+      }
+      if (!force && !hasReferences(filesToCompact)) {
+        // Here we select files for incremental compaction.  
+        // The rule is: if the largest(oldest) one is more than twice the 
+    	// size of the second, skip the largest, and continue to next...,
+        // until we meet the compactionThreshold limit.
+        for (point = 0; point < compactionThreshold - 1; point++) {
+          if (fileSizes[point] < fileSizes[point + 1] * 2) {
+            break;
+          }
+          skipped += fileSizes[point];
+        }
+        filesToCompact = new ArrayList<HStoreFile>(filesToCompact.subList(point,
+          countOfFiles));
+        LOG.info("Compaction size " + totalSize + ", skipped " + point +
+          ", " + skipped);
+      }
+
       /*
        * We create a new list of MapFile.Reader objects so we don't screw up
        * the caching associated with the currently-loaded ones. Our iteration-
@@ -794,10 +844,9 @@
 
       // Move the compaction into place.
       completeCompaction(filesToCompact, compactedOutputFile);
-
       if (LOG.isDebugEnabled()) {
         LOG.debug("Completed compaction of " + this.storeNameStr +
-            " store size is " + StringUtils.humanReadableInt(storeSize));
+          " store size is " + StringUtils.humanReadableInt(storeSize));
       }
     }
     return checkSplit();

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=678650&r1=678649&r2=678650&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Mon
Jul 21 22:49:36 2008
@@ -105,6 +105,7 @@
     assertTrue(cellValues.length == 3);
     r.flushcache();
     r.compactStores();
+    assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
     // Now assert that there are 4 versions of a record only: thats the
     // 3 versions that should be in the compacted store and then the one more
     // we added when we flushed. But could be 3 only if the flush happened
@@ -132,6 +133,7 @@
     // Assert all delted.
     assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
     r.flushcache();
+    assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 2);
     assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
     // Add a bit of data and flush it so we for sure have the compaction limit
     // for store files.  Usually by this time we will have but if compaction
@@ -140,7 +142,9 @@
     // content to be certain.
     createSmallerStoreFile(this.r);
     r.flushcache();
+    assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 3);
     r.compactStores();
+    assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 2);
     // Assert that the first row is still deleted.
     cellValues = r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
     assertNull(cellValues);

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMetaUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMetaUtils.java?rev=678650&r1=678649&r2=678650&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMetaUtils.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMetaUtils.java Mon Jul 21
22:49:36 2008
@@ -25,6 +25,7 @@
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HConnectionManager;
 
 
 public class TestMetaUtils extends HBaseClusterTestCase {
@@ -46,12 +47,15 @@
     utils.addColumn(editTable, new HColumnDescriptor(newColumn));
     utils.deleteColumn(editTable, Bytes.toBytes(oldColumn));
     utils.shutdown();
+    // Delete again so we go get it all fresh.
+    HConnectionManager.deleteConnectionInfo();
     // Now assert columns were added and deleted.
     this.cluster = new MiniHBaseCluster(this.conf, 1);
+    // Now assert columns were added and deleted.
     HTable t = new HTable(conf, editTable);
     HTableDescriptor htd = t.getTableDescriptor();
     HColumnDescriptor hcd = htd.getFamily(newColumn);
     assertTrue(hcd != null);
     assertNull(htd.getFamily(Bytes.toBytes(oldColumn)));
   }
-}
\ No newline at end of file
+}



Mime
View raw message