hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mbau...@apache.org
Subject svn commit: r1395032 - in /hbase/branches/0.89-fb: bin/ src/main/java/org/apache/hadoop/hbase/ src/main/ruby/hbase/ src/test/java/org/apache/hadoop/hbase/ src/test/java/org/apache/hadoop/hbase/client/
Date Sat, 06 Oct 2012 12:50:28 GMT
Author: mbautin
Date: Sat Oct  6 12:50:28 2012
New Revision: 1395032

URL: http://svn.apache.org/viewvc?rev=1395032&view=rev
Log:
[HBASE-2376] Introduce a Flash Back Time parameter in HColumnDescriptor.

Author: pritam

Summary:
For the purpose of supporting flash back queries in HBase we
need a new parameter at a column family level which specifies how far
back in time we support flash back queries.

Test Plan:
1) All unit tests
2) Unit test added.

Reviewers: kannan, kranganathan, aaiyer, mbautin

Reviewed By: kannan

CC: hbase-eng@

Differential Revision: https://phabricator.fb.com/D592709

Task ID: 1733764

Modified:
    hbase/branches/0.89-fb/bin/alter_cf_meta.rb
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    hbase/branches/0.89-fb/src/main/ruby/hbase/admin.rb
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java

Modified: hbase/branches/0.89-fb/bin/alter_cf_meta.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/bin/alter_cf_meta.rb?rev=1395032&r1=1395031&r2=1395032&view=diff
==============================================================================
--- hbase/branches/0.89-fb/bin/alter_cf_meta.rb (original)
+++ hbase/branches/0.89-fb/bin/alter_cf_meta.rb Sat Oct  6 12:50:28 2012
@@ -34,6 +34,7 @@ BLOOMFILTER = HColumnDescriptor::BLOOMFI
 BLOOMFILTER_ERRORRATE = HColumnDescriptor::BLOOMFILTER_ERRORRATE
 REPLICATION_SCOPE = HColumnDescriptor::REPLICATION_SCOPE
 TTL = HColumnDescriptor::TTL
+FLASHBACK_QUERY_LIMIT = HColumnDescriptor::FLASHBACK_QUERY_LIMIT
 COMPRESSION = HColumnDescriptor::COMPRESSION
 BLOCKSIZE = HColumnDescriptor::BLOCKSIZE
 
@@ -51,6 +52,7 @@ def getHCD(arg, htd)
   family.setScope(JInteger.valueOf(arg[REPLICATION_SCOPE])) if arg.include?(REPLICATION_SCOPE)
   family.setInMemory(JBoolean.valueOf(arg[IN_MEMORY])) if arg.include?(IN_MEMORY)
   family.setTimeToLive(JInteger.valueOf(arg[TTL])) if arg.include?(TTL)
+  family.setFlashBackQueryLimit(JInteger.valueOf(arg[FLASHBACK_QUERY_LIMIT])) if arg.include?(FLASHBACK_QUERY_LIMIT)
   family.setCompressionType(Compression::Algorithm.valueOf(arg[COMPRESSION])) if arg.include?(COMPRESSION)
   family.setBlocksize(JInteger.valueOf(arg[BLOCKSIZE])) if arg.include?(BLOCKSIZE)
   family.setMaxVersions(JInteger.valueOf(arg[VERSIONS])) if arg.include?(VERSIONS)

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=1395032&r1=1395031&r2=1395032&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java Sat
Oct  6 12:50:28 2012
@@ -57,7 +57,8 @@ public class HColumnDescriptor implement
   // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
   // Version 8 -- reintroduction of bloom filters, changed from boolean to enum
   // Version 9 -- add data block encoding
-  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 9;
+  // Version 10 -- add flash back time
+  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 10;
 
   // These constants are used as FileInfo keys
   public static final String COMPRESSION = "COMPRESSION";
@@ -87,6 +88,10 @@ public class HColumnDescriptor implement
   public static final String BLOCKSIZE = "BLOCKSIZE";
   public static final String LENGTH = "LENGTH";
   public static final String TTL = "TTL";
+  // The amount of time in seconds in the past upto which we support FlashBack
+  // queries. Ex. 60 * 60 * 24 indicates we support FlashBack queries upto 1 day
+  // ago.
+  public static final String FLASHBACK_QUERY_LIMIT = "FLASHBACK_QUERY_LIMIT";
   public static final String BLOOMFILTER = "BLOOMFILTER";
   public static final String BLOOMFILTER_ERRORRATE = "BLOOMFILTER_ERRORRATE";
   public static final String FOREVER = "FOREVER";
@@ -153,6 +158,12 @@ public class HColumnDescriptor implement
   public static final int DEFAULT_TTL = HConstants.FOREVER;
 
   /**
+   * Default flash back time. Flash back time is the number of seconds in the
+   * past upto which we support flash back queries.
+   */
+  public static final int DEFAULT_FLASHBACK_QUERY_LIMIT = 0;
+
+  /**
    * Default scope.
    */
   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
@@ -169,6 +180,8 @@ public class HColumnDescriptor implement
       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
+      DEFAULT_VALUES.put(FLASHBACK_QUERY_LIMIT,
+        String.valueOf(DEFAULT_FLASHBACK_QUERY_LIMIT));
       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
@@ -627,6 +640,27 @@ public class HColumnDescriptor implement
   }
 
   /**
+   * @return the time in seconds for how far back in the past we support flash
+   *         back queries.
+   */
+  public int getFlashBackQueryLimit() {
+    String value = getValue(FLASHBACK_QUERY_LIMIT);
+    return (value != null) ? Integer.valueOf(value).intValue()
+        : DEFAULT_FLASHBACK_QUERY_LIMIT;
+  }
+
+  /**
+   * @param flashBackQueryLimit
+   *          the time in seconds for how far back in the past we support flash
+   *          back queries.
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setFlashBackQueryLimit(int flashBackQueryLimit) {
+    return setValue(FLASHBACK_QUERY_LIMIT,
+        Integer.toString(flashBackQueryLimit));
+  }
+
+  /**
    * @return True if MapFile blocks should be cached.
    */
   public boolean isBlockCacheEnabled() {
@@ -715,7 +749,6 @@ public class HColumnDescriptor implement
   }
 
   public String toStringCustomizedValues() {
-    Map<String, String> defaults = getDefaultValues();
     StringBuilder s = new StringBuilder();
     s.append('{');
     s.append(HConstants.NAME);

Modified: hbase/branches/0.89-fb/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/ruby/hbase/admin.rb?rev=1395032&r1=1395031&r2=1395032&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/ruby/hbase/admin.rb (original)
+++ hbase/branches/0.89-fb/src/main/ruby/hbase/admin.rb Sat Oct  6 12:50:28 2012
@@ -467,6 +467,7 @@ module Hbase
       family.setScope(JInteger.valueOf(arg[REPLICATION_SCOPE])) if arg.include?(HColumnDescriptor::REPLICATION_SCOPE)
       family.setInMemory(JBoolean.valueOf(arg[IN_MEMORY])) if arg.include?(HColumnDescriptor::IN_MEMORY)
       family.setTimeToLive(JInteger.valueOf(arg[HColumnDescriptor::TTL])) if arg.include?(HColumnDescriptor::TTL)
+      family.setFlashBackQueryLimit(JInteger.valueOf(arg[HColumnDescriptor::FLASHBACK_QUERY_LIMIT]))
if arg.include?(HColumnDescriptor::FLASHBACK_QUERY_LIMIT)
       family.setCompressionType(Compression::Algorithm.valueOf(arg[HColumnDescriptor::COMPRESSION]))
if arg.include?(HColumnDescriptor::COMPRESSION)
       family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING]))
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING)
       family.setEncodeOnDisk(JBoolean.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK]))
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK)

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1395032&r1=1395031&r2=1395032&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
Sat Oct  6 12:50:28 2012
@@ -536,6 +536,29 @@ public class HBaseTestingUtility {
   }
 
   /**
+   * Create a table
+   *
+   * @param tableName
+   * @param columnDescriptors
+   * @return An HTable instance for the created table.
+   * @throws IOException
+   */
+  public HTable createTable(byte[] tableName,
+      HColumnDescriptor[] columnDescriptors) throws IOException {
+    HTableDescriptor desc = new HTableDescriptor(tableName);
+    for (HColumnDescriptor columnDescriptor : columnDescriptors) {
+      desc.addFamily(columnDescriptor);
+    }
+    HBaseAdmin admin = new HBaseAdmin(getConfiguration());
+    try {
+      admin.createTable(desc);
+      return new HTable(getConfiguration(), tableName);
+    } finally {
+      admin.close();
+    }
+  }
+
+  /**
    * Create a table.
    * @param tableName
    * @param family

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java?rev=1395032&r1=1395031&r2=1395032&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
Sat Oct  6 12:50:28 2012
@@ -129,6 +129,24 @@ public class TestFromClientSide {
     // Nothing to do.
   }
 
+  @Test
+  public void testFlashBackTime() throws Exception {
+    byte[] TABLE = Bytes.toBytes("testFlashBackTime");
+    HColumnDescriptor[] expected = new HColumnDescriptor[10];
+    for (int i = 0; i < expected.length; i++) {
+      byte[] FAMILY = Bytes.toBytes("foo" + i);
+      HColumnDescriptor desc = new HColumnDescriptor(FAMILY);
+      int fbt = random.nextInt();
+      desc.setFlashBackQueryLimit(fbt);
+      expected[i] = desc;
+    }
+    TEST_UTIL.createTable(TABLE, expected);
+    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+    HTableDescriptor tdesc = admin.getTableDescriptor(TABLE);
+    HColumnDescriptor[] actual = tdesc.getColumnFamilies();
+    assertTrue(Arrays.equals(expected, actual));
+  }
+
   /**
    * Verifies that getConfiguration returns the same Configuration object used
    * to create the HTable instance.



Mime
View raw message