hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r997168 - in /hbase/trunk: CHANGES.txt src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java src/main/java/org/apache/hadoop/hbase/regionserver/Store.java src/main/ruby/hbase/admin.rb
Date Wed, 15 Sep 2010 02:01:56 GMT
Author: apurtell
Date: Wed Sep 15 02:01:56 2010
New Revision: 997168

URL: http://svn.apache.org/viewvc?rev=997168&view=rev
Log:
HBASE-2988 Support alternate compression for major compactions

Modified:
    hbase/trunk/CHANGES.txt
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
    hbase/trunk/src/main/ruby/hbase/admin.rb

Modified: hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/trunk/CHANGES.txt?rev=997168&r1=997167&r2=997168&view=diff
==============================================================================
--- hbase/trunk/CHANGES.txt (original)
+++ hbase/trunk/CHANGES.txt Wed Sep 15 02:01:56 2010
@@ -906,6 +906,7 @@ Release 0.21.0 - Unreleased
                'IllegalArgumentException: Wrong FS'
    HBASE-2977  Refactor master command line to a new class
    HBASE-2980  Refactor region server command line to a new class
+   HBASE-2988  Support alternate compression for major compactions
 
   NEW FEATURES
    HBASE-1961  HBase EC2 scripts

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=997168&r1=997167&r2=997168&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java Wed Sep 15 02:01:56
2010
@@ -72,6 +72,7 @@ public class HColumnDescriptor implement
   }
 
   public static final String COMPRESSION = "COMPRESSION";
+  public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
   public static final String BLOCKCACHE = "BLOCKCACHE";
   public static final String BLOCKSIZE = "BLOCKSIZE";
   public static final String LENGTH = "LENGTH";
@@ -353,6 +354,19 @@ public class HColumnDescriptor implement
   /** @return compression type being used for the column family */
   public Compression.Algorithm getCompression() {
     String n = getValue(COMPRESSION);
+    if (n == null) {
+      return Compression.Algorithm.NONE;
+    }
+    return Compression.Algorithm.valueOf(n.toUpperCase());
+  }
+
+  /** @return compression type being used for the column family for major 
+      compression */
+  public Compression.Algorithm getCompactionCompression() {
+    String n = getValue(COMPRESSION_COMPACT);
+    if (n == null) {
+      return getCompression();
+    }
     return Compression.Algorithm.valueOf(n.toUpperCase());
   }
 
@@ -418,6 +432,30 @@ public class HColumnDescriptor implement
   }
 
   /**
+   * @return Compression type setting.
+   */
+  public Compression.Algorithm getCompactionCompressionType() {
+    return getCompactionCompression();
+  }
+
+  /**
+   * Compression types supported in hbase.
+   * LZO is not bundled as part of the hbase distribution.
+   * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
+   * for how to enable it.
+   * @param type Compression type setting.
+   */
+  public void setCompactionCompressionType(Compression.Algorithm type) {
+    String compressionType;
+    switch (type) {
+      case LZO: compressionType = "LZO"; break;
+      case GZ: compressionType = "GZ"; break;
+      default: compressionType = "NONE"; break;
+    }
+    setValue(COMPRESSION_COMPACT, compressionType);
+  }
+
+  /**
    * @return True if we are to keep all in use HRegionServer cache.
    */
   public boolean isInMemory() {

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=997168&r1=997167&r2=997168&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Wed Sep 15 02:01:56
2010
@@ -112,7 +112,10 @@ public class Store implements HeapSize {
   private final int compactionThreshold;
   private final int blocksize;
   private final boolean blockcache;
+  /** Compression algorithm for flush files and minor compaction */
   private final Compression.Algorithm compression;
+  /** Compression algorithm for major compaction */
+  private final Compression.Algorithm compactionCompression;
 
   // Comparing KeyValues
   final KeyValue.KVComparator comparator;
@@ -144,6 +147,11 @@ public class Store implements HeapSize {
     this.blockcache = family.isBlockCacheEnabled();
     this.blocksize = family.getBlocksize();
     this.compression = family.getCompression();
+    // avoid overriding compression setting for major compactions if the user 
+    // has not specified it separately
+    this.compactionCompression =
+      (family.getCompactionCompression() != Compression.Algorithm.NONE) ? 
+        family.getCompactionCompression() : this.compression;
     this.comparator = info.getComparator();
     // getTimeToLive returns ttl in seconds.  Convert to milliseconds.
     this.ttl = family.getTimeToLive();
@@ -487,12 +495,24 @@ public class Store implements HeapSize {
   }
 
   /*
+   * @param maxKeyCount
    * @return Writer for a new StoreFile in the tmp dir.
    */
   private StoreFile.Writer createWriterInTmp(int maxKeyCount)
   throws IOException {
+    return createWriterInTmp(maxKeyCount, this.compression);
+  }
+
+  /*
+   * @param maxKeyCount
+   * @param compression Compression algorithm to use
+   * @return Writer for a new StoreFile in the tmp dir.
+   */
+  private StoreFile.Writer createWriterInTmp(int maxKeyCount,
+    Compression.Algorithm compression)
+  throws IOException {
     return StoreFile.createWriter(this.fs, region.getTmpDir(), this.blocksize,
-        this.compression, this.comparator, this.conf,
+        compression, this.comparator, this.conf,
         this.family.getBloomFilterType(), maxKeyCount);
   }
 
@@ -804,7 +824,8 @@ public class Store implements HeapSize {
           // output to writer:
           for (KeyValue kv : kvs) {
             if (writer == null) {
-              writer = createWriterInTmp(maxKeyCount);
+              writer = createWriterInTmp(maxKeyCount, 
+                this.compactionCompression);
             }
             writer.append(kv);
           }

Modified: hbase/trunk/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/ruby/hbase/admin.rb?rev=997168&r1=997167&r2=997168&view=diff
==============================================================================
--- hbase/trunk/src/main/ruby/hbase/admin.rb (original)
+++ hbase/trunk/src/main/ruby/hbase/admin.rb Wed Sep 15 02:01:56 2010
@@ -132,7 +132,11 @@ module Hbase
         end
 
         # Add column to the table
-        htd.addFamily(hcd(arg))
+        descriptor = hcd(arg)
+        if arg[COMPRESSION_COMPACT]
+          descriptor.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT])
+        end
+        htd.addFamily(descriptor)
       end
 
       # Perform the create table call
@@ -216,6 +220,9 @@ module Hbase
         # No method parameter, try to use the args as a column definition
         unless method = arg.delete(METHOD)
           descriptor = hcd(arg)
+          if arg[COMPRESSION_COMPACT]
+            descriptor.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT])
+          end
           column_name = descriptor.getNameAsString
 
           # If column already exist, then try to alter it. Create otherwise.



Mime
View raw message