hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r726153 - in /hadoop/hbase/trunk: ./ conf/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/io/ src/java/org/apache/hadoop/hbase/regionserver/ src/java/org/apache/hadoop/hbase/util/
Date Sat, 13 Dec 2008 01:15:40 GMT
Author: stack
Date: Fri Dec 12 17:15:39 2008
New Revision: 726153

URL: http://svn.apache.org/viewvc?rev=726153&view=rev
Log:
HBASE-900 Regionserver memory leak causing OOME during relatively modest bulk importing; part
2

Added:
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HeapSize.java
Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/conf/hbase-default.xml
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchOperation.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=726153&r1=726152&r2=726153&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Dec 12 17:15:39 2008
@@ -102,7 +102,7 @@
    HBASE-1000  Sleeper.sleep does not go back to sleep when interrupted
                and no stop flag given.
    HBASE-900   Regionserver memory leak causing OOME during relatively
-               modest bulk importing; part 1
+               modest bulk importing; part 1 and part 2
    HBASE-1054  Index NPE on scanning (Clint Morgan via Andrew Purtell)
    HBASE-1052  Stopping a HRegionServer with unflushed cache causes data loss
                from org.apache.hadoop.hbase.DroppedSnapshotException

Modified: hadoop/hbase/trunk/conf/hbase-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/conf/hbase-default.xml?rev=726153&r1=726152&r2=726153&view=diff
==============================================================================
--- hadoop/hbase/trunk/conf/hbase-default.xml (original)
+++ hadoop/hbase/trunk/conf/hbase-default.xml Fri Dec 12 17:15:39 2008
@@ -53,9 +53,12 @@
   </property>
   <property>
     <name>hbase.client.write.buffer</name>
-    <value>10485760</value>
+    <value>2097152</value>
     <description>Size of the write buffer in bytes. A bigger buffer takes more
-    memory but reduces the number of RPC.
+    memory -- on both the client and server side since server instantiates
+    the passed write buffer to process it -- but reduces the number of RPC.  
+    For an estimate of server-side memory-used, evaluate
+    hbase.client.write.buffer * hbase.regionserver.handler.count
     </description>
   </property>
   <property>

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java?rev=726153&r1=726152&r2=726153&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java Fri Dec 12 17:15:39
2008
@@ -24,6 +24,7 @@
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableComparator;
@@ -31,7 +32,7 @@
 /**
  * A Key for a stored row.
  */
-public class HStoreKey implements WritableComparable<HStoreKey> {
+public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
   /**
    * Colon character in UTF-8
    */
@@ -46,6 +47,14 @@
    * It is not serialized.  See https://issues.apache.org/jira/browse/HBASE-832
    */
   private HRegionInfo regionInfo = null;
+  
+  /**
+   * Estimated size tax paid for each instance of HSK.  Estimate based on
+   * study of jhat and jprofiler numbers.
+   */
+  // In jprofiler, says shallow size is 48 bytes.  Add to it cost of two
+  // byte arrays and then something for the HRI hosting.
+  public static final int ESTIMATED_HEAP_TAX = 48;
 
   /** Default constructor used in conjunction with Writable interface */
   public HStoreKey() {
@@ -200,12 +209,7 @@
     this.timestamp = timestamp;
     this.regionInfo = regionInfo;
   }
-  
-  /** @return Approximate size in bytes of this key. */
-  public long getSize() {
-    return getRow().length + getColumn().length + Bytes.SIZEOF_LONG;
-  }
-  
+
   /**
    * Constructs a new HStoreKey from another
    * 
@@ -586,7 +590,13 @@
     this.column = Bytes.readByteArray(in);
     this.timestamp = in.readLong();
   }
-  
+
+  public long heapSize() {
+    return getRow().length + Bytes.ESTIMATED_HEAP_TAX +
+      getColumn().length + Bytes.ESTIMATED_HEAP_TAX +
+      ESTIMATED_HEAP_TAX;
+  }
+
   /**
    * Passed as comparator for memcache and for store files.  See HBASE-868.
    */
@@ -649,8 +659,8 @@
     }
 
     @Override
-    public long getSize() {
-      return this.beforeThisKey.getSize();
+    public long heapSize() {
+      return this.beforeThisKey.heapSize();
     }
 
     @Override

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=726153&r1=726152&r2=726153&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java Fri
Dec 12 17:15:39 2008
@@ -854,9 +854,6 @@
             throw new RetriesExhaustedException(callable.getServerName(),
                 callable.getRegionName(), callable.getRow(), tries, exceptions);
           }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("reloading table servers because: " + t.getMessage());
-          }
         }
         try {
           Thread.sleep(getPauseTime(tries));

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java?rev=726153&r1=726152&r2=726153&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java Fri Dec 12 17:15:39
2008
@@ -113,7 +113,7 @@
     this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW);
     this.writeBuffer = new ArrayList<BatchUpdate>();
     this.writeBufferSize = 
-      this.configuration.getLong("hbase.client.write.buffer", 10485760);
+      this.configuration.getLong("hbase.client.write.buffer", 2097152);
     this.autoFlush = true;
     this.currentWriteBufferSize = 0;
     this.scannerCaching = conf.getInt("hbase.client.scanner.caching", 30);
@@ -1233,8 +1233,8 @@
       batchUpdate.setRowLock(rl.getLockId());
     }
     writeBuffer.add(batchUpdate);
-    currentWriteBufferSize += batchUpdate.getSize();
-    if(autoFlush || currentWriteBufferSize > writeBufferSize) {
+    currentWriteBufferSize += batchUpdate.heapSize();
+    if (autoFlush || currentWriteBufferSize > writeBufferSize) {
       flushCommits();
     }
   }
@@ -1247,12 +1247,12 @@
    */ 
   public synchronized void commit(final List<BatchUpdate> batchUpdates)
       throws IOException {
-    for(BatchUpdate bu : batchUpdates) {
+    for (BatchUpdate bu : batchUpdates) {
       checkRowAndColumns(bu);
       writeBuffer.add(bu);
-      currentWriteBufferSize += bu.getSize();
+      currentWriteBufferSize += bu.heapSize();
     }
-    if(autoFlush || currentWriteBufferSize > writeBufferSize) {
+    if (autoFlush || currentWriteBufferSize > writeBufferSize) {
       flushCommits();
     }
   }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchOperation.java?rev=726153&r1=726152&r2=726153&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchOperation.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchOperation.java Fri Dec 12
17:15:39 2008
@@ -35,11 +35,17 @@
  * a class per type because it makes the serialization easier.
  * @see BatchUpdate 
  */
-public class BatchOperation implements Writable {
+public class BatchOperation implements Writable, HeapSize {
+  /**
+   * Estimated size of this object.
+   */
+  // JHat says this is 32 bytes.
+  public final int ESTIMATED_HEAP_TAX = 36;
+  
   private byte [] column = null;
   
   // A null value defines DELETE operations.
-  private byte[] value = null;
+  private byte [] value = null;
   
   /**
    * Default constructor
@@ -132,4 +138,9 @@
       out.write(value);
     }
   }
+  
+  public long heapSize() {
+    return Bytes.ESTIMATED_HEAP_TAX * 2 + this.column.length +
+      this.value.length + ESTIMATED_HEAP_TAX;
+  }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java?rev=726153&r1=726152&r2=726153&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java Fri Dec 12 17:15:39
2008
@@ -22,12 +22,15 @@
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Iterator;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.RowLock;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableComparable;
 
@@ -38,8 +41,15 @@
  * can result in multiple BatchUpdate objects if the batch contains rows that
  * are served by multiple region servers.
  */
-public class BatchUpdate implements WritableComparable<BatchUpdate>, 
-  Iterable<BatchOperation> {
+public class BatchUpdate
+implements WritableComparable<BatchUpdate>, Iterable<BatchOperation>, HeapSize
{
+  private static final Log LOG = LogFactory.getLog(BatchUpdate.class);
+  
+  /**
+   * Estimated 'shallow size' of this object not counting payload.
+   */
+  // Shallow size is 56.  Add 32 for the arraylist below.
+  public static final int ESTIMATED_HEAP_TAX = 56 + 32;
   
   // the row being updated
   private byte [] row = null;
@@ -142,13 +152,6 @@
   }
 
   /**
-   * @return BatchUpdate size in bytes.
-   */
-  public long getSize() {
-    return size;
-  }
-
-  /**
    * @return the timestamp this BatchUpdate will be committed with.
    */
   public long getTimestamp() {
@@ -247,8 +250,9 @@
       // If null, the PUT becomes a DELETE operation.
       throw new IllegalArgumentException("Passed value cannot be null");
     }
-    size += val.length + column.length;
-    operations.add(new BatchOperation(column, val));
+    BatchOperation bo = new BatchOperation(column, val);
+    this.size += bo.heapSize();
+    operations.add(bo);
   }
 
   /** 
@@ -336,4 +340,48 @@
   public int compareTo(BatchUpdate o) {
     return Bytes.compareTo(this.row, o.getRow());
   }
-}
+
+  public long heapSize() {
+    return this.row.length + Bytes.ESTIMATED_HEAP_TAX + this.size +
+      ESTIMATED_HEAP_TAX;
+  }
+  
+  /**
+   * Code to test sizes of BatchUpdate arrays.
+   * @param args
+   * @throws InterruptedException
+   */
+  public static void main(String[] args) throws InterruptedException {
+    RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
+    LOG.info("vmName=" + runtime.getVmName() + ", vmVendor="
+        + runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());
+    LOG.info("vmInputArguments=" + runtime.getInputArguments());
+    final int count = 10000;
+    BatchUpdate[] batch1 = new BatchUpdate[count];
+    // TODO: x32 vs x64
+    long size = 0;
+    for (int i = 0; i < count; i++) {
+      BatchUpdate bu = new BatchUpdate(HConstants.EMPTY_BYTE_ARRAY);
+      bu.put(HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY);
+      batch1[i] = bu;
+      size += bu.heapSize();
+    }
+    LOG.info("batch1 estimated size=" + size);
+    // Make a variably sized memcache.
+    size = 0;
+    BatchUpdate[] batch2 = new BatchUpdate[count];
+    for (int i = 0; i < count; i++) {
+      BatchUpdate bu = new BatchUpdate(Bytes.toBytes(i));
+      bu.put(Bytes.toBytes(i), new byte[i]);
+      batch2[i] = bu;
+      size += bu.heapSize();
+    }
+    LOG.info("batch2 estimated size=" + size);
+    final int seconds = 30;
+    LOG.info("Waiting " + seconds + " seconds while heap dump is taken");
+    for (int i = 0; i < seconds; i++) {
+      Thread.sleep(1000);
+    }
+    LOG.info("Exiting.");
+  }
+}
\ No newline at end of file

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HeapSize.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HeapSize.java?rev=726153&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HeapSize.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HeapSize.java Fri Dec 12 17:15:39
2008
@@ -0,0 +1,33 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io;
+
+/**
+ * Implementations can be asked for an estimate of their size in bytes.
+ * Useful for sizing caches.  Its a given that implementation approximations
+ * probably do not account for 32 vs 64 bit nor for different VM implemenations.
+ */
+public interface HeapSize {
+  /**
+   * @return Approximate 'exclusive deep size' of implementing object.  Includes
+   * count of payload and hosting object sizings.
+   */
+  public long heapSize();
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=726153&r1=726152&r2=726153&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java Fri Dec 12
17:15:39 2008
@@ -634,8 +634,9 @@
     return compactionNeeded;
   }
   
-  private boolean internalFlushCache(SortedMap<HStoreKey, byte []> cache,
-      long logCacheFlushId) throws IOException {
+  private boolean internalFlushCache(final SortedMap<HStoreKey, byte []> cache,
+    final long logCacheFlushId)
+  throws IOException {
     long flushed = 0;
     // Don't flush if there are no entries.
     if (cache.size() == 0) {
@@ -674,7 +675,7 @@
             if (!isExpired(curkey, ttl, now)) {
               entries++;
               out.append(curkey, new ImmutableBytesWritable(bytes));
-              flushed += curkey.getSize() + (bytes == null ? 0 : bytes.length);
+              flushed += this.memcache.heapSize(curkey, bytes, null);
             }
           }
         }
@@ -693,7 +694,7 @@
       if(LOG.isDebugEnabled()) {
         LOG.debug("Added " + FSUtils.getPath(flushedFile.getMapFilePath()) +
           " with " + entries +
-          " entries, sequence id " + logCacheFlushId + ", data size " +
+          " entries, sequence id " + logCacheFlushId + ", data size ~" +
           StringUtils.humanReadableInt(flushed) + ", file size " +
           StringUtils.humanReadableInt(newStoreSize) + " to " +
           this.info.getRegionNameAsString());

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java?rev=726153&r1=726152&r2=726153&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java Fri Dec
12 17:15:39 2008
@@ -21,6 +21,8 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
 import java.rmi.UnexpectedException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -52,7 +54,7 @@
  * this point we let the snapshot go.
  */
 class Memcache {
-  private final Log LOG = LogFactory.getLog(this.getClass().getName());
+  private static final Log LOG = LogFactory.getLog(Memcache.class);
   
   private final long ttl;
   
@@ -170,18 +172,48 @@
    * Write an update
    * @param key
    * @param value
-   * @return memcache size delta
+   * @return memcache Approximate size of the passed key and value.  Includes
+   * cost of hosting HSK and byte arrays as well as the Map.Entry this addition
+   * costs when we insert into the backing TreeMap.
    */
   long add(final HStoreKey key, final byte[] value) {
+    long size = -1;
     this.lock.readLock().lock();
     try {
-      byte[] oldValue = this.memcache.remove(key);
+      byte [] oldValue = this.memcache.remove(key);
       this.memcache.put(key, value);
-      return key.getSize() + (value == null ? 0 : value.length) -
-          (oldValue == null ? 0 : oldValue.length);
+      size = heapSize(key, value, oldValue);
     } finally {
       this.lock.readLock().unlock();
     }
+    return size;
+  }
+  
+  /*
+   * Calcuate how the memcache size has changed, approximately.
+   * Add in tax of TreeMap.Entry.
+   * @param key
+   * @param value
+   * @param oldValue
+   * @return
+   */
+  long heapSize(final HStoreKey key, final byte [] value,
+      final byte [] oldValue) {
+    // First add value length.
+    long keySize = key.heapSize();
+    // Add value.
+    long size = value == null? 0: value.length;
+    if (oldValue == null) {
+      size += keySize;
+      // Add overhead for value byte array and for Map.Entry -- 57 bytes
+      // on x64 according to jprofiler.
+      size += Bytes.ESTIMATED_HEAP_TAX + 57;
+    } else {
+      // If old value, don't add overhead again nor key size. Just add
+      // difference in  value sizes.
+      size -= oldValue.length;
+    }
+    return size;
   }
 
   /**
@@ -835,4 +867,47 @@
       }
     }
   }
-}
+
+  /**
+   * Code to help figure if our approximation of object heap sizes is close
+   * enough.  See hbase-900.  Fills memcaches then waits so user can heap
+   * dump and bring up resultant hprof in something like jprofiler which
+   * allows you get 'deep size' on objects.
+   * @param args
+   * @throws InterruptedException
+   */
+  public static void main(String [] args) throws InterruptedException {
+    RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
+    LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" +
+      runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());
+    LOG.info("vmInputArguments=" + runtime.getInputArguments());
+    Memcache memcache1 = new Memcache();
+    // TODO: x32 vs x64
+    long size = 0;
+    final int count = 10000;
+    for (int i = 0; i < count; i++) {
+      size += memcache1.add(new HStoreKey(Bytes.toBytes(i)),
+        HConstants.EMPTY_BYTE_ARRAY);
+    }
+    LOG.info("memcache1 estimated size=" + size);
+    for (int i = 0; i < count; i++) {
+      size += memcache1.add(new HStoreKey(Bytes.toBytes(i)),
+        HConstants.EMPTY_BYTE_ARRAY);
+    }
+    LOG.info("memcache1 estimated size (2nd loading of same data)=" + size);
+    // Make a variably sized memcache.
+    Memcache memcache2 = new Memcache();
+    for (int i = 0; i < count; i++) {
+      byte [] b = Bytes.toBytes(i);
+      size += memcache2.add(new HStoreKey(b, b),
+        new byte [i]);
+    }
+    LOG.info("memcache2 estimated size=" + size);
+    final int seconds = 30;
+    LOG.info("Waiting " + seconds + " seconds while heap dump is taken");
+    for (int i = 0; i < seconds; i++) {
+      Thread.sleep(1000);
+    }
+    LOG.info("Exiting.");
+  }
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java?rev=726153&r1=726152&r2=726153&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java Fri Dec 12 17:15:39
2008
@@ -37,6 +37,13 @@
    * Size of double in bytes
    */
   public static final int SIZEOF_DOUBLE = Double.SIZE/Byte.SIZE;
+  
+  /**
+   * Estimate of size cost to pay beyond payload in jvm for instance of byte [].
+   * Estimate based on study of jhat and jprofiler numbers.
+   */
+  // JHat says BU is 56 bytes.
+  public static final int ESTIMATED_HEAP_TAX = 16;
 
   /**
    * Pass this to TreeMaps where byte [] are keys.



Mime
View raw message