hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r735089 - in /hadoop/hbase/trunk: CHANGES.txt conf/hbase-default.xml src/java/org/apache/hadoop/hbase/HColumnDescriptor.java src/java/org/apache/hadoop/hbase/io/BlockFSInputStream.java
Date Fri, 16 Jan 2009 19:07:43 GMT
Author: stack
Date: Fri Jan 16 11:07:42 2009
New Revision: 735089

URL: http://svn.apache.org/viewvc?rev=735089&view=rev
Log:
HBASE-1127 OOME running randomRead PE

Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/conf/hbase-default.xml
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BlockFSInputStream.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=735089&r1=735088&r2=735089&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Jan 16 11:07:42 2009
@@ -143,6 +143,7 @@
    HBASE-1125  IllegalStateException: Cannot set a region to be closed if it was
                not already marked as pending close
    HBASE-1124  Balancer kicks in way too early
+   HBASE-1127  OOME running randomRead PE
 
   IMPROVEMENTS
    HBASE-901   Add a limit to key length, check key and value length on client side

Modified: hadoop/hbase/trunk/conf/hbase-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/conf/hbase-default.xml?rev=735089&r1=735088&r2=735089&view=diff
==============================================================================
--- hadoop/hbase/trunk/conf/hbase-default.xml (original)
+++ hadoop/hbase/trunk/conf/hbase-default.xml Fri Jan 16 11:07:42 2009
@@ -319,8 +319,7 @@
     <description>The size of each block in the block cache.
     Enable blockcaching on a per column family basis; see the BLOCKCACHE setting
     in HColumnDescriptor.  Blocks are kept in a java Soft Reference cache so are
-    let go when high pressure on memory.  Block caching is enabled by default
-    as of hbase 0.19.0.
+    let go when high pressure on memory.  Block caching is not enabled by default.
     </description>
   </property>
   <property>

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=735089&r1=735088&r2=735089&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java Fri Jan 16
11:07:42 2009
@@ -103,7 +103,7 @@
   /**
    * Default setting for whether to use a block cache or not.
    */
-  public static final boolean DEFAULT_BLOCKCACHE = true;
+  public static final boolean DEFAULT_BLOCKCACHE = false;
 
   /**
    * Default setting for whether or not to use bloomfilters.

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BlockFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BlockFSInputStream.java?rev=735089&r1=735088&r2=735089&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BlockFSInputStream.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BlockFSInputStream.java Fri Jan
16 11:07:42 2009
@@ -53,10 +53,9 @@
       public Thread newThread(Runnable r) {
         Thread t = new Thread(r);
         t.setDaemon(true);
-        t.setName("BlockFSInputStream referenceQueue Checker");
+        t.setName("BlockFSInputStreamReferenceQueueChecker");
         return t;
       }
-
     });
   
   /*
@@ -92,7 +91,7 @@
     }
     this.fileLength = fileLength;
     this.blockSize = blockSize;
-    // a memory-sensitive map that has soft references to values
+    // A memory-sensitive map that has soft references to values
     this.blocks = new SoftValueMap<Long, byte []>() {
       private long hits, misses;
       public byte [] get(Object key) {
@@ -111,14 +110,14 @@
     };
     // Register a Runnable that runs checkReferences on a period.
     final int hashcode = hashCode();
-    this.registration = EXECUTOR.scheduleAtFixedRate(new Runnable() {
+    this.registration = EXECUTOR.scheduleWithFixedDelay(new Runnable() {
       public void run() {
         int cleared = checkReferences();
         if (LOG.isDebugEnabled() && cleared > 0) {
-          LOG.debug("Cleared " + cleared + " in " + hashcode);
+          LOG.debug("Checker cleared " + cleared + " in " + hashcode);
         }
       }
-    }, 10, 10, TimeUnit.SECONDS);
+    }, 1, 1, TimeUnit.SECONDS);
   }
 
   @Override
@@ -214,6 +213,10 @@
     if (!this.registration.cancel(false)) {
       LOG.warn("Failed cancel of " + this.registration);
     }
+    int cleared = checkReferences();
+    if (LOG.isDebugEnabled() && cleared > 0) {
+      LOG.debug("Close cleared " + cleared + " in " + hashCode());
+    }
     if (blockStream != null) {
       blockStream.close();
       blockStream = null;
@@ -246,7 +249,7 @@
    * @return Count of references cleared.
    */
   public synchronized int checkReferences() {
-    if (closed || this.blocks == null) {
+    if (this.closed) {
       return 0;
     }
     return this.blocks.checkReferences();



Mime
View raw message