hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r782501 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/regionserver/
Date Mon, 08 Jun 2009 01:48:06 GMT
Author: stack
Date: Mon Jun  8 01:48:06 2009
New Revision: 782501

URL: http://svn.apache.org/viewvc?rev=782501&view=rev
Log:
HBASE-1486 BLOCKCACHE always on even when disabled

Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=782501&r1=782500&r2=782501&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Jun  8 01:48:06 2009
@@ -165,6 +165,7 @@
                (since hbase-1430); should be other way round
    HBASE-1493  New TableMapReduceUtil methods should be static (Billy Pearson
                via Andrew Purtell)
+   HBASE-1486  BLOCKCACHE always on even when disabled (Lars George via Stack)
 
   IMPROVEMENTS
    HBASE-1089  Add count of regions on filesystem to master UI; add percentage

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=782501&r1=782500&r2=782501&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java Mon Jun  8
01:48:06 2009
@@ -108,7 +108,7 @@
   /**
    * Default setting for whether to use a block cache or not.
    */
-  public static final boolean DEFAULT_BLOCKCACHE = false;
+  public static final boolean DEFAULT_BLOCKCACHE = true;
 
   /**
    * Default size of blocks in files store to the filesytem.  Use smaller for

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=782501&r1=782500&r2=782501&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java Mon Jun  8
01:48:06 2009
@@ -130,6 +130,7 @@
   private final Object compactLock = new Object();
   private final int compactionThreshold;
   private final int blocksize;
+  private final boolean blockcache;
   private final boolean bloomfilter;
   private final Compression.Algorithm compression;
   
@@ -162,6 +163,7 @@
     this.fs = fs;
     this.conf = conf;
     this.bloomfilter = family.isBloomfilter();
+    this.blockcache = family.isBlockCacheEnabled();
     this.blocksize = family.getBlocksize();
     this.compression = family.getCompression();
     this.comparator = info.getComparator();
@@ -360,7 +362,7 @@
       }
       StoreFile curfile = null;
       try {
-        curfile = new StoreFile(fs, p, this.conf);
+        curfile = new StoreFile(fs, p, blockcache, this.conf);
       } catch (IOException ioe) {
         LOG.warn("Failed open of " + p + "; presumption is that file was " +
           "corrupted at flush and lost edits picked up by commit log replay. " +
@@ -516,7 +518,8 @@
         writer.close();
       }
     }
-    StoreFile sf = new StoreFile(this.fs, writer.getPath(), this.conf);
+    StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache, 
+      this.conf);
     this.storeSize += sf.getReader().length();
     if(LOG.isDebugEnabled()) {
       LOG.debug("Added " + sf + ", entries=" + sf.getReader().getEntries() +
@@ -890,7 +893,8 @@
       LOG.error("Failed move of compacted file " + compactedFile.getPath(), e);
       return;
     }
-    StoreFile finalCompactedFile = new StoreFile(this.fs, p, this.conf);
+    StoreFile finalCompactedFile = new StoreFile(this.fs, p, blockcache, 
+      this.conf);
     this.lock.writeLock().lock();
     try {
       try {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=782501&r1=782500&r2=782501&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Mon Jun
 8 01:48:06 2009
@@ -71,7 +71,9 @@
   private Reference reference;
   // If this StoreFile references another, this is the other files path.
   private Path referencePath;
-
+  // Should the block cache be used or not.
+  private boolean blockcache;
+  
   // Keys for metadata stored in backing HFile.
   private static final byte [] MAX_SEQ_ID_KEY = Bytes.toBytes("MAX_SEQ_ID_KEY");
   // Set when we obtain a Reader.
@@ -100,22 +102,25 @@
   /**
    * Constructor, loads a reader and it's indices, etc. May allocate a 
    * substantial amount of ram depending on the underlying files (10-20MB?).
-   * @param fs
-   * @param p
-   * @param conf
-   * @throws IOException
+   * 
+   * @param fs  The current file system to use.
+   * @param p  The path of the file.
+   * @param blockcache  <code>true</code> if the block cache is enabled.
+   * @param conf  The current configuration.
+   * @throws IOException When opening the reader fails.
    */
-  StoreFile(final FileSystem fs, final Path p, final HBaseConfiguration conf) 
+  StoreFile(final FileSystem fs, final Path p, final boolean blockcache, 
+      final HBaseConfiguration conf) 
   throws IOException {
     this.conf = conf;
     this.fs = fs;
     this.path = p;
+    this.blockcache = blockcache;
     if (isReference(p)) {
       this.reference = Reference.read(fs, p);
       this.referencePath = getReferredToFile(this.path);
     }
     this.reader = open();
-
   }
 
   /**
@@ -229,7 +234,7 @@
    * @return the blockcache
    */
   public BlockCache getBlockCache() {
-    return getBlockCache(conf);
+    return blockcache ? getBlockCache(conf) : null;
   }
 
   /**

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=782501&r1=782500&r2=782501&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Mon
Jun  8 01:48:06 2009
@@ -74,7 +74,7 @@
       new Path(new Path(this.testDir, "regionname"), "familyname"),
       2 * 1024, null, null);
     writeStoreFile(writer);
-    checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf));
+    checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf));
   }
   
   /*
@@ -113,7 +113,7 @@
     HFile.Writer writer = StoreFile.getWriter(this.fs, dir, 8 * 1024, null,
       null);
     writeStoreFile(writer);
-    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf);
+    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf);
     HFile.Reader reader = hsf.getReader();
     // Split on a row, not in middle of row.  Midkey returned by reader
     // may be in middle of row.  Create new one with empty column and
@@ -124,7 +124,7 @@
     byte [] finalKey = hsk.getRow();
     // Make a reference
     Path refPath = StoreFile.split(fs, dir, hsf, reader.midkey(), Range.top);
-    StoreFile refHsf = new StoreFile(this.fs, refPath, conf);
+    StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf);
     // Now confirm that I can read from the reference and that it only gets
     // keys from top half of the file.
     HFileScanner s = refHsf.getReader().getScanner();
@@ -158,8 +158,8 @@
     Path bottomPath = StoreFile.split(this.fs, bottomDir,
       f, midkey, Range.bottom);
     // Make readers on top and bottom.
-    HFile.Reader top = new StoreFile(this.fs, topPath, conf).getReader();
-    HFile.Reader bottom = new StoreFile(this.fs, bottomPath, conf).getReader();
+    HFile.Reader top = new StoreFile(this.fs, topPath, true, conf).getReader();
+    HFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
     ByteBuffer previous = null;
     LOG.info("Midkey: " + Bytes.toString(midkey));
     byte [] midkeyBytes = new HStoreKey(midkey).getBytes();
@@ -212,8 +212,8 @@
       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
         Range.bottom);
-      top = new StoreFile(this.fs, topPath, conf).getReader();
-      bottom = new StoreFile(this.fs, bottomPath, conf).getReader();
+      top = new StoreFile(this.fs, topPath, true, conf).getReader();
+      bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
       bottomScanner = bottom.getScanner();
       int count = 0;
       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
@@ -256,8 +256,8 @@
       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
         Range.bottom);
-      top = new StoreFile(this.fs, topPath, conf).getReader();
-      bottom = new StoreFile(this.fs, bottomPath, conf).getReader();
+      top = new StoreFile(this.fs, topPath, true, conf).getReader();
+      bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
       first = true;
       bottomScanner = bottom.getScanner();
       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||



Mime
View raw message