incubator-blur-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From amccu...@apache.org
Subject [18/28] Initial commit of the back port. The blur-util, blur-store, have been completed. Also a new distribution project help with the building of the project. Also all of the pom files have been updated to the new version. This is very much a work i
Date Mon, 18 Mar 2013 01:10:31 GMT
http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-thrift/src/main/java/org/apache/blur/thrift/generated/TableDescriptor.java
----------------------------------------------------------------------
diff --git a/src/blur-thrift/src/main/java/org/apache/blur/thrift/generated/TableDescriptor.java b/src/blur-thrift/src/main/java/org/apache/blur/thrift/generated/TableDescriptor.java
index d297dbb..816b66f 100644
--- a/src/blur-thrift/src/main/java/org/apache/blur/thrift/generated/TableDescriptor.java
+++ b/src/blur-thrift/src/main/java/org/apache/blur/thrift/generated/TableDescriptor.java
@@ -1,7 +1,8 @@
 /**
- * Autogenerated by Thrift Compiler (0.7.0)
+ * Autogenerated by Thrift Compiler (0.9.0)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
  */
 package org.apache.blur.thrift.generated;
 
@@ -24,6 +25,15 @@ package org.apache.blur.thrift.generated;
 
 
 
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.Map;
@@ -59,6 +69,12 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
   private static final org.apache.thrift.protocol.TField READ_ONLY_FIELD_DESC = new org.apache.thrift.protocol.TField("readOnly", org.apache.thrift.protocol.TType.BOOL, (short)12);
   private static final org.apache.thrift.protocol.TField COLUMN_PRE_CACHE_FIELD_DESC = new org.apache.thrift.protocol.TField("columnPreCache", org.apache.thrift.protocol.TType.STRUCT, (short)13);
 
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TableDescriptorStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TableDescriptorTupleSchemeFactory());
+  }
+
   /**
    * 
    */
@@ -253,8 +269,7 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
   private static final int __COMPRESSIONBLOCKSIZE_ISSET_ID = 2;
   private static final int __BLOCKCACHING_ISSET_ID = 3;
   private static final int __READONLY_ISSET_ID = 4;
-  private BitSet __isset_bit_vector = new BitSet(5);
-
+  private byte __isset_bitfield = 0;
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -346,8 +361,7 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
    * Performs a deep copy on <i>other</i>.
    */
   public TableDescriptor(TableDescriptor other) {
-    __isset_bit_vector.clear();
-    __isset_bit_vector.or(other.__isset_bit_vector);
+    __isset_bitfield = other.__isset_bitfield;
     this.isEnabled = other.isEnabled;
     if (other.isSetAnalyzerDefinition()) {
       this.analyzerDefinition = new AnalyzerDefinition(other.analyzerDefinition);
@@ -428,16 +442,16 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
   }
 
   public void unsetIsEnabled() {
-    __isset_bit_vector.clear(__ISENABLED_ISSET_ID);
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISENABLED_ISSET_ID);
   }
 
   /** Returns true if field isEnabled is set (has been assigned a value) and false otherwise */
   public boolean isSetIsEnabled() {
-    return __isset_bit_vector.get(__ISENABLED_ISSET_ID);
+    return EncodingUtils.testBit(__isset_bitfield, __ISENABLED_ISSET_ID);
   }
 
   public void setIsEnabledIsSet(boolean value) {
-    __isset_bit_vector.set(__ISENABLED_ISSET_ID, value);
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISENABLED_ISSET_ID, value);
   }
 
   /**
@@ -487,16 +501,16 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
   }
 
   public void unsetShardCount() {
-    __isset_bit_vector.clear(__SHARDCOUNT_ISSET_ID);
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SHARDCOUNT_ISSET_ID);
   }
 
   /** Returns true if field shardCount is set (has been assigned a value) and false otherwise */
   public boolean isSetShardCount() {
-    return __isset_bit_vector.get(__SHARDCOUNT_ISSET_ID);
+    return EncodingUtils.testBit(__isset_bitfield, __SHARDCOUNT_ISSET_ID);
   }
 
   public void setShardCountIsSet(boolean value) {
-    __isset_bit_vector.set(__SHARDCOUNT_ISSET_ID, value);
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SHARDCOUNT_ISSET_ID, value);
   }
 
   /**
@@ -576,16 +590,16 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
   }
 
   public void unsetCompressionBlockSize() {
-    __isset_bit_vector.clear(__COMPRESSIONBLOCKSIZE_ISSET_ID);
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COMPRESSIONBLOCKSIZE_ISSET_ID);
   }
 
   /** Returns true if field compressionBlockSize is set (has been assigned a value) and false otherwise */
   public boolean isSetCompressionBlockSize() {
-    return __isset_bit_vector.get(__COMPRESSIONBLOCKSIZE_ISSET_ID);
+    return EncodingUtils.testBit(__isset_bitfield, __COMPRESSIONBLOCKSIZE_ISSET_ID);
   }
 
   public void setCompressionBlockSizeIsSet(boolean value) {
-    __isset_bit_vector.set(__COMPRESSIONBLOCKSIZE_ISSET_ID, value);
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COMPRESSIONBLOCKSIZE_ISSET_ID, value);
   }
 
   /**
@@ -695,16 +709,16 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
   }
 
   public void unsetBlockCaching() {
-    __isset_bit_vector.clear(__BLOCKCACHING_ISSET_ID);
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __BLOCKCACHING_ISSET_ID);
   }
 
   /** Returns true if field blockCaching is set (has been assigned a value) and false otherwise */
   public boolean isSetBlockCaching() {
-    return __isset_bit_vector.get(__BLOCKCACHING_ISSET_ID);
+    return EncodingUtils.testBit(__isset_bitfield, __BLOCKCACHING_ISSET_ID);
   }
 
   public void setBlockCachingIsSet(boolean value) {
-    __isset_bit_vector.set(__BLOCKCACHING_ISSET_ID, value);
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __BLOCKCACHING_ISSET_ID, value);
   }
 
   public int getBlockCachingFileTypesSize() {
@@ -771,16 +785,16 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
   }
 
   public void unsetReadOnly() {
-    __isset_bit_vector.clear(__READONLY_ISSET_ID);
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __READONLY_ISSET_ID);
   }
 
   /** Returns true if field readOnly is set (has been assigned a value) and false otherwise */
   public boolean isSetReadOnly() {
-    return __isset_bit_vector.get(__READONLY_ISSET_ID);
+    return EncodingUtils.testBit(__isset_bitfield, __READONLY_ISSET_ID);
   }
 
   public void setReadOnlyIsSet(boolean value) {
-    __isset_bit_vector.set(__READONLY_ISSET_ID, value);
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __READONLY_ISSET_ID, value);
   }
 
   /**
@@ -1288,202 +1302,11 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
   }
 
   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    org.apache.thrift.protocol.TField field;
-    iprot.readStructBegin();
-    while (true)
-    {
-      field = iprot.readFieldBegin();
-      if (field.type == org.apache.thrift.protocol.TType.STOP) { 
-        break;
-      }
-      switch (field.id) {
-        case 1: // IS_ENABLED
-          if (field.type == org.apache.thrift.protocol.TType.BOOL) {
-            this.isEnabled = iprot.readBool();
-            setIsEnabledIsSet(true);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 2: // ANALYZER_DEFINITION
-          if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
-            this.analyzerDefinition = new AnalyzerDefinition();
-            this.analyzerDefinition.read(iprot);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 3: // SHARD_COUNT
-          if (field.type == org.apache.thrift.protocol.TType.I32) {
-            this.shardCount = iprot.readI32();
-            setShardCountIsSet(true);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 4: // TABLE_URI
-          if (field.type == org.apache.thrift.protocol.TType.STRING) {
-            this.tableUri = iprot.readString();
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 5: // COMPRESSION_CLASS
-          if (field.type == org.apache.thrift.protocol.TType.STRING) {
-            this.compressionClass = iprot.readString();
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 6: // COMPRESSION_BLOCK_SIZE
-          if (field.type == org.apache.thrift.protocol.TType.I32) {
-            this.compressionBlockSize = iprot.readI32();
-            setCompressionBlockSizeIsSet(true);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 7: // CLUSTER
-          if (field.type == org.apache.thrift.protocol.TType.STRING) {
-            this.cluster = iprot.readString();
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 8: // NAME
-          if (field.type == org.apache.thrift.protocol.TType.STRING) {
-            this.name = iprot.readString();
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 9: // SIMILARITY_CLASS
-          if (field.type == org.apache.thrift.protocol.TType.STRING) {
-            this.similarityClass = iprot.readString();
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 10: // BLOCK_CACHING
-          if (field.type == org.apache.thrift.protocol.TType.BOOL) {
-            this.blockCaching = iprot.readBool();
-            setBlockCachingIsSet(true);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 11: // BLOCK_CACHING_FILE_TYPES
-          if (field.type == org.apache.thrift.protocol.TType.SET) {
-            {
-              org.apache.thrift.protocol.TSet _set79 = iprot.readSetBegin();
-              this.blockCachingFileTypes = new HashSet<String>(2*_set79.size);
-              for (int _i80 = 0; _i80 < _set79.size; ++_i80)
-              {
-                String _elem81; // required
-                _elem81 = iprot.readString();
-                this.blockCachingFileTypes.add(_elem81);
-              }
-              iprot.readSetEnd();
-            }
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 12: // READ_ONLY
-          if (field.type == org.apache.thrift.protocol.TType.BOOL) {
-            this.readOnly = iprot.readBool();
-            setReadOnlyIsSet(true);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 13: // COLUMN_PRE_CACHE
-          if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
-            this.columnPreCache = new ColumnPreCache();
-            this.columnPreCache.read(iprot);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        default:
-          org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-      }
-      iprot.readFieldEnd();
-    }
-    iprot.readStructEnd();
-
-    // check for required fields of primitive type, which can't be checked in the validate method
-    validate();
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    validate();
-
-    oprot.writeStructBegin(STRUCT_DESC);
-    oprot.writeFieldBegin(IS_ENABLED_FIELD_DESC);
-    oprot.writeBool(this.isEnabled);
-    oprot.writeFieldEnd();
-    if (this.analyzerDefinition != null) {
-      oprot.writeFieldBegin(ANALYZER_DEFINITION_FIELD_DESC);
-      this.analyzerDefinition.write(oprot);
-      oprot.writeFieldEnd();
-    }
-    oprot.writeFieldBegin(SHARD_COUNT_FIELD_DESC);
-    oprot.writeI32(this.shardCount);
-    oprot.writeFieldEnd();
-    if (this.tableUri != null) {
-      oprot.writeFieldBegin(TABLE_URI_FIELD_DESC);
-      oprot.writeString(this.tableUri);
-      oprot.writeFieldEnd();
-    }
-    if (this.compressionClass != null) {
-      oprot.writeFieldBegin(COMPRESSION_CLASS_FIELD_DESC);
-      oprot.writeString(this.compressionClass);
-      oprot.writeFieldEnd();
-    }
-    oprot.writeFieldBegin(COMPRESSION_BLOCK_SIZE_FIELD_DESC);
-    oprot.writeI32(this.compressionBlockSize);
-    oprot.writeFieldEnd();
-    if (this.cluster != null) {
-      oprot.writeFieldBegin(CLUSTER_FIELD_DESC);
-      oprot.writeString(this.cluster);
-      oprot.writeFieldEnd();
-    }
-    if (this.name != null) {
-      oprot.writeFieldBegin(NAME_FIELD_DESC);
-      oprot.writeString(this.name);
-      oprot.writeFieldEnd();
-    }
-    if (this.similarityClass != null) {
-      oprot.writeFieldBegin(SIMILARITY_CLASS_FIELD_DESC);
-      oprot.writeString(this.similarityClass);
-      oprot.writeFieldEnd();
-    }
-    oprot.writeFieldBegin(BLOCK_CACHING_FIELD_DESC);
-    oprot.writeBool(this.blockCaching);
-    oprot.writeFieldEnd();
-    if (this.blockCachingFileTypes != null) {
-      oprot.writeFieldBegin(BLOCK_CACHING_FILE_TYPES_FIELD_DESC);
-      {
-        oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, this.blockCachingFileTypes.size()));
-        for (String _iter82 : this.blockCachingFileTypes)
-        {
-          oprot.writeString(_iter82);
-        }
-        oprot.writeSetEnd();
-      }
-      oprot.writeFieldEnd();
-    }
-    oprot.writeFieldBegin(READ_ONLY_FIELD_DESC);
-    oprot.writeBool(this.readOnly);
-    oprot.writeFieldEnd();
-    if (this.columnPreCache != null) {
-      oprot.writeFieldBegin(COLUMN_PRE_CACHE_FIELD_DESC);
-      this.columnPreCache.write(oprot);
-      oprot.writeFieldEnd();
-    }
-    oprot.writeFieldStop();
-    oprot.writeStructEnd();
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
   @Override
@@ -1580,6 +1403,13 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
 
   public void validate() throws org.apache.thrift.TException {
     // check for required fields
+    // check for sub-struct validity
+    if (analyzerDefinition != null) {
+      analyzerDefinition.validate();
+    }
+    if (columnPreCache != null) {
+      columnPreCache.validate();
+    }
   }
 
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
@@ -1593,12 +1423,398 @@ public class TableDescriptor implements org.apache.thrift.TBase<TableDescriptor,
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bit_vector = new BitSet(1);
+      __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
     } catch (org.apache.thrift.TException te) {
       throw new java.io.IOException(te);
     }
   }
 
+  private static class TableDescriptorStandardSchemeFactory implements SchemeFactory {
+    public TableDescriptorStandardScheme getScheme() {
+      return new TableDescriptorStandardScheme();
+    }
+  }
+
+  private static class TableDescriptorStandardScheme extends StandardScheme<TableDescriptor> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TableDescriptor struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // IS_ENABLED
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.isEnabled = iprot.readBool();
+              struct.setIsEnabledIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // ANALYZER_DEFINITION
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.analyzerDefinition = new AnalyzerDefinition();
+              struct.analyzerDefinition.read(iprot);
+              struct.setAnalyzerDefinitionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // SHARD_COUNT
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.shardCount = iprot.readI32();
+              struct.setShardCountIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // TABLE_URI
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableUri = iprot.readString();
+              struct.setTableUriIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // COMPRESSION_CLASS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.compressionClass = iprot.readString();
+              struct.setCompressionClassIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // COMPRESSION_BLOCK_SIZE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.compressionBlockSize = iprot.readI32();
+              struct.setCompressionBlockSizeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // CLUSTER
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.cluster = iprot.readString();
+              struct.setClusterIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.name = iprot.readString();
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // SIMILARITY_CLASS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.similarityClass = iprot.readString();
+              struct.setSimilarityClassIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 10: // BLOCK_CACHING
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.blockCaching = iprot.readBool();
+              struct.setBlockCachingIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 11: // BLOCK_CACHING_FILE_TYPES
+            if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
+              {
+                org.apache.thrift.protocol.TSet _set158 = iprot.readSetBegin();
+                struct.blockCachingFileTypes = new HashSet<String>(2*_set158.size);
+                for (int _i159 = 0; _i159 < _set158.size; ++_i159)
+                {
+                  String _elem160; // required
+                  _elem160 = iprot.readString();
+                  struct.blockCachingFileTypes.add(_elem160);
+                }
+                iprot.readSetEnd();
+              }
+              struct.setBlockCachingFileTypesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 12: // READ_ONLY
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.readOnly = iprot.readBool();
+              struct.setReadOnlyIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 13: // COLUMN_PRE_CACHE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.columnPreCache = new ColumnPreCache();
+              struct.columnPreCache.read(iprot);
+              struct.setColumnPreCacheIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+
+      // check for required fields of primitive type, which can't be checked in the validate method
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TableDescriptor struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(IS_ENABLED_FIELD_DESC);
+      oprot.writeBool(struct.isEnabled);
+      oprot.writeFieldEnd();
+      if (struct.analyzerDefinition != null) {
+        oprot.writeFieldBegin(ANALYZER_DEFINITION_FIELD_DESC);
+        struct.analyzerDefinition.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(SHARD_COUNT_FIELD_DESC);
+      oprot.writeI32(struct.shardCount);
+      oprot.writeFieldEnd();
+      if (struct.tableUri != null) {
+        oprot.writeFieldBegin(TABLE_URI_FIELD_DESC);
+        oprot.writeString(struct.tableUri);
+        oprot.writeFieldEnd();
+      }
+      if (struct.compressionClass != null) {
+        oprot.writeFieldBegin(COMPRESSION_CLASS_FIELD_DESC);
+        oprot.writeString(struct.compressionClass);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(COMPRESSION_BLOCK_SIZE_FIELD_DESC);
+      oprot.writeI32(struct.compressionBlockSize);
+      oprot.writeFieldEnd();
+      if (struct.cluster != null) {
+        oprot.writeFieldBegin(CLUSTER_FIELD_DESC);
+        oprot.writeString(struct.cluster);
+        oprot.writeFieldEnd();
+      }
+      if (struct.name != null) {
+        oprot.writeFieldBegin(NAME_FIELD_DESC);
+        oprot.writeString(struct.name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.similarityClass != null) {
+        oprot.writeFieldBegin(SIMILARITY_CLASS_FIELD_DESC);
+        oprot.writeString(struct.similarityClass);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(BLOCK_CACHING_FIELD_DESC);
+      oprot.writeBool(struct.blockCaching);
+      oprot.writeFieldEnd();
+      if (struct.blockCachingFileTypes != null) {
+        oprot.writeFieldBegin(BLOCK_CACHING_FILE_TYPES_FIELD_DESC);
+        {
+          oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.blockCachingFileTypes.size()));
+          for (String _iter161 : struct.blockCachingFileTypes)
+          {
+            oprot.writeString(_iter161);
+          }
+          oprot.writeSetEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(READ_ONLY_FIELD_DESC);
+      oprot.writeBool(struct.readOnly);
+      oprot.writeFieldEnd();
+      if (struct.columnPreCache != null) {
+        oprot.writeFieldBegin(COLUMN_PRE_CACHE_FIELD_DESC);
+        struct.columnPreCache.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TableDescriptorTupleSchemeFactory implements SchemeFactory {
+    public TableDescriptorTupleScheme getScheme() {
+      return new TableDescriptorTupleScheme();
+    }
+  }
+
+  private static class TableDescriptorTupleScheme extends TupleScheme<TableDescriptor> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TableDescriptor struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetIsEnabled()) {
+        optionals.set(0);
+      }
+      if (struct.isSetAnalyzerDefinition()) {
+        optionals.set(1);
+      }
+      if (struct.isSetShardCount()) {
+        optionals.set(2);
+      }
+      if (struct.isSetTableUri()) {
+        optionals.set(3);
+      }
+      if (struct.isSetCompressionClass()) {
+        optionals.set(4);
+      }
+      if (struct.isSetCompressionBlockSize()) {
+        optionals.set(5);
+      }
+      if (struct.isSetCluster()) {
+        optionals.set(6);
+      }
+      if (struct.isSetName()) {
+        optionals.set(7);
+      }
+      if (struct.isSetSimilarityClass()) {
+        optionals.set(8);
+      }
+      if (struct.isSetBlockCaching()) {
+        optionals.set(9);
+      }
+      if (struct.isSetBlockCachingFileTypes()) {
+        optionals.set(10);
+      }
+      if (struct.isSetReadOnly()) {
+        optionals.set(11);
+      }
+      if (struct.isSetColumnPreCache()) {
+        optionals.set(12);
+      }
+      oprot.writeBitSet(optionals, 13);
+      if (struct.isSetIsEnabled()) {
+        oprot.writeBool(struct.isEnabled);
+      }
+      if (struct.isSetAnalyzerDefinition()) {
+        struct.analyzerDefinition.write(oprot);
+      }
+      if (struct.isSetShardCount()) {
+        oprot.writeI32(struct.shardCount);
+      }
+      if (struct.isSetTableUri()) {
+        oprot.writeString(struct.tableUri);
+      }
+      if (struct.isSetCompressionClass()) {
+        oprot.writeString(struct.compressionClass);
+      }
+      if (struct.isSetCompressionBlockSize()) {
+        oprot.writeI32(struct.compressionBlockSize);
+      }
+      if (struct.isSetCluster()) {
+        oprot.writeString(struct.cluster);
+      }
+      if (struct.isSetName()) {
+        oprot.writeString(struct.name);
+      }
+      if (struct.isSetSimilarityClass()) {
+        oprot.writeString(struct.similarityClass);
+      }
+      if (struct.isSetBlockCaching()) {
+        oprot.writeBool(struct.blockCaching);
+      }
+      if (struct.isSetBlockCachingFileTypes()) {
+        {
+          oprot.writeI32(struct.blockCachingFileTypes.size());
+          for (String _iter162 : struct.blockCachingFileTypes)
+          {
+            oprot.writeString(_iter162);
+          }
+        }
+      }
+      if (struct.isSetReadOnly()) {
+        oprot.writeBool(struct.readOnly);
+      }
+      if (struct.isSetColumnPreCache()) {
+        struct.columnPreCache.write(oprot);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TableDescriptor struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(13);
+      if (incoming.get(0)) {
+        struct.isEnabled = iprot.readBool();
+        struct.setIsEnabledIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.analyzerDefinition = new AnalyzerDefinition();
+        struct.analyzerDefinition.read(iprot);
+        struct.setAnalyzerDefinitionIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.shardCount = iprot.readI32();
+        struct.setShardCountIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.tableUri = iprot.readString();
+        struct.setTableUriIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.compressionClass = iprot.readString();
+        struct.setCompressionClassIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.compressionBlockSize = iprot.readI32();
+        struct.setCompressionBlockSizeIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.cluster = iprot.readString();
+        struct.setClusterIsSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.name = iprot.readString();
+        struct.setNameIsSet(true);
+      }
+      if (incoming.get(8)) {
+        struct.similarityClass = iprot.readString();
+        struct.setSimilarityClassIsSet(true);
+      }
+      if (incoming.get(9)) {
+        struct.blockCaching = iprot.readBool();
+        struct.setBlockCachingIsSet(true);
+      }
+      if (incoming.get(10)) {
+        {
+          org.apache.thrift.protocol.TSet _set163 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.blockCachingFileTypes = new HashSet<String>(2*_set163.size);
+          for (int _i164 = 0; _i164 < _set163.size; ++_i164)
+          {
+            String _elem165; // required
+            _elem165 = iprot.readString();
+            struct.blockCachingFileTypes.add(_elem165);
+          }
+        }
+        struct.setBlockCachingFileTypesIsSet(true);
+      }
+      if (incoming.get(11)) {
+        struct.readOnly = iprot.readBool();
+        struct.setReadOnlyIsSet(true);
+      }
+      if (incoming.get(12)) {
+        struct.columnPreCache = new ColumnPreCache();
+        struct.columnPreCache.read(iprot);
+        struct.setColumnPreCacheIsSet(true);
+      }
+    }
+  }
+
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-thrift/src/main/java/org/apache/blur/thrift/generated/TableStats.java
----------------------------------------------------------------------
diff --git a/src/blur-thrift/src/main/java/org/apache/blur/thrift/generated/TableStats.java b/src/blur-thrift/src/main/java/org/apache/blur/thrift/generated/TableStats.java
index f4bae2d..dca3f1d 100644
--- a/src/blur-thrift/src/main/java/org/apache/blur/thrift/generated/TableStats.java
+++ b/src/blur-thrift/src/main/java/org/apache/blur/thrift/generated/TableStats.java
@@ -1,7 +1,8 @@
 /**
- * Autogenerated by Thrift Compiler (0.7.0)
+ * Autogenerated by Thrift Compiler (0.9.0)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
  */
 package org.apache.blur.thrift.generated;
 
@@ -24,6 +25,15 @@ package org.apache.blur.thrift.generated;
 
 
 
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.Map;
@@ -51,6 +61,12 @@ public class TableStats implements org.apache.thrift.TBase<TableStats, TableStat
   private static final org.apache.thrift.protocol.TField ROW_COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("rowCount", org.apache.thrift.protocol.TType.I64, (short)4);
   private static final org.apache.thrift.protocol.TField QUERIES_FIELD_DESC = new org.apache.thrift.protocol.TField("queries", org.apache.thrift.protocol.TType.I64, (short)5);
 
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TableStatsStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TableStatsTupleSchemeFactory());
+  }
+
   /**
    * 
    */
@@ -162,8 +178,7 @@ public class TableStats implements org.apache.thrift.TBase<TableStats, TableStat
   private static final int __RECORDCOUNT_ISSET_ID = 1;
   private static final int __ROWCOUNT_ISSET_ID = 2;
   private static final int __QUERIES_ISSET_ID = 3;
-  private BitSet __isset_bit_vector = new BitSet(4);
-
+  private byte __isset_bitfield = 0;
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -207,8 +222,7 @@ public class TableStats implements org.apache.thrift.TBase<TableStats, TableStat
    * Performs a deep copy on <i>other</i>.
    */
   public TableStats(TableStats other) {
-    __isset_bit_vector.clear();
-    __isset_bit_vector.or(other.__isset_bit_vector);
+    __isset_bitfield = other.__isset_bitfield;
     if (other.isSetTableName()) {
       this.tableName = other.tableName;
     }
@@ -282,16 +296,16 @@ public class TableStats implements org.apache.thrift.TBase<TableStats, TableStat
   }
 
   public void unsetBytes() {
-    __isset_bit_vector.clear(__BYTES_ISSET_ID);
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __BYTES_ISSET_ID);
   }
 
   /** Returns true if field bytes is set (has been assigned a value) and false otherwise */
   public boolean isSetBytes() {
-    return __isset_bit_vector.get(__BYTES_ISSET_ID);
+    return EncodingUtils.testBit(__isset_bitfield, __BYTES_ISSET_ID);
   }
 
   public void setBytesIsSet(boolean value) {
-    __isset_bit_vector.set(__BYTES_ISSET_ID, value);
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __BYTES_ISSET_ID, value);
   }
 
   /**
@@ -311,16 +325,16 @@ public class TableStats implements org.apache.thrift.TBase<TableStats, TableStat
   }
 
   public void unsetRecordCount() {
-    __isset_bit_vector.clear(__RECORDCOUNT_ISSET_ID);
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RECORDCOUNT_ISSET_ID);
   }
 
   /** Returns true if field recordCount is set (has been assigned a value) and false otherwise */
   public boolean isSetRecordCount() {
-    return __isset_bit_vector.get(__RECORDCOUNT_ISSET_ID);
+    return EncodingUtils.testBit(__isset_bitfield, __RECORDCOUNT_ISSET_ID);
   }
 
   public void setRecordCountIsSet(boolean value) {
-    __isset_bit_vector.set(__RECORDCOUNT_ISSET_ID, value);
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RECORDCOUNT_ISSET_ID, value);
   }
 
   /**
@@ -340,16 +354,16 @@ public class TableStats implements org.apache.thrift.TBase<TableStats, TableStat
   }
 
   public void unsetRowCount() {
-    __isset_bit_vector.clear(__ROWCOUNT_ISSET_ID);
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ROWCOUNT_ISSET_ID);
   }
 
   /** Returns true if field rowCount is set (has been assigned a value) and false otherwise */
   public boolean isSetRowCount() {
-    return __isset_bit_vector.get(__ROWCOUNT_ISSET_ID);
+    return EncodingUtils.testBit(__isset_bitfield, __ROWCOUNT_ISSET_ID);
   }
 
   public void setRowCountIsSet(boolean value) {
-    __isset_bit_vector.set(__ROWCOUNT_ISSET_ID, value);
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ROWCOUNT_ISSET_ID, value);
   }
 
   /**
@@ -369,16 +383,16 @@ public class TableStats implements org.apache.thrift.TBase<TableStats, TableStat
   }
 
   public void unsetQueries() {
-    __isset_bit_vector.clear(__QUERIES_ISSET_ID);
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __QUERIES_ISSET_ID);
   }
 
   /** Returns true if field queries is set (has been assigned a value) and false otherwise */
   public boolean isSetQueries() {
-    return __isset_bit_vector.get(__QUERIES_ISSET_ID);
+    return EncodingUtils.testBit(__isset_bitfield, __QUERIES_ISSET_ID);
   }
 
   public void setQueriesIsSet(boolean value) {
-    __isset_bit_vector.set(__QUERIES_ISSET_ID, value);
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __QUERIES_ISSET_ID, value);
   }
 
   public void setFieldValue(_Fields field, Object value) {
@@ -600,88 +614,11 @@ public class TableStats implements org.apache.thrift.TBase<TableStats, TableStat
   }
 
   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    org.apache.thrift.protocol.TField field;
-    iprot.readStructBegin();
-    while (true)
-    {
-      field = iprot.readFieldBegin();
-      if (field.type == org.apache.thrift.protocol.TType.STOP) { 
-        break;
-      }
-      switch (field.id) {
-        case 1: // TABLE_NAME
-          if (field.type == org.apache.thrift.protocol.TType.STRING) {
-            this.tableName = iprot.readString();
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 2: // BYTES
-          if (field.type == org.apache.thrift.protocol.TType.I64) {
-            this.bytes = iprot.readI64();
-            setBytesIsSet(true);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 3: // RECORD_COUNT
-          if (field.type == org.apache.thrift.protocol.TType.I64) {
-            this.recordCount = iprot.readI64();
-            setRecordCountIsSet(true);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 4: // ROW_COUNT
-          if (field.type == org.apache.thrift.protocol.TType.I64) {
-            this.rowCount = iprot.readI64();
-            setRowCountIsSet(true);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 5: // QUERIES
-          if (field.type == org.apache.thrift.protocol.TType.I64) {
-            this.queries = iprot.readI64();
-            setQueriesIsSet(true);
-          } else { 
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        default:
-          org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-      }
-      iprot.readFieldEnd();
-    }
-    iprot.readStructEnd();
-
-    // check for required fields of primitive type, which can't be checked in the validate method
-    validate();
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    validate();
-
-    oprot.writeStructBegin(STRUCT_DESC);
-    if (this.tableName != null) {
-      oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
-      oprot.writeString(this.tableName);
-      oprot.writeFieldEnd();
-    }
-    oprot.writeFieldBegin(BYTES_FIELD_DESC);
-    oprot.writeI64(this.bytes);
-    oprot.writeFieldEnd();
-    oprot.writeFieldBegin(RECORD_COUNT_FIELD_DESC);
-    oprot.writeI64(this.recordCount);
-    oprot.writeFieldEnd();
-    oprot.writeFieldBegin(ROW_COUNT_FIELD_DESC);
-    oprot.writeI64(this.rowCount);
-    oprot.writeFieldEnd();
-    oprot.writeFieldBegin(QUERIES_FIELD_DESC);
-    oprot.writeI64(this.queries);
-    oprot.writeFieldEnd();
-    oprot.writeFieldStop();
-    oprot.writeStructEnd();
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
   @Override
@@ -718,6 +655,7 @@ public class TableStats implements org.apache.thrift.TBase<TableStats, TableStat
 
   public void validate() throws org.apache.thrift.TException {
     // check for required fields
+    // check for sub-struct validity
   }
 
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
@@ -731,12 +669,180 @@ public class TableStats implements org.apache.thrift.TBase<TableStats, TableStat
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bit_vector = new BitSet(1);
+      __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
     } catch (org.apache.thrift.TException te) {
       throw new java.io.IOException(te);
     }
   }
 
+  private static class TableStatsStandardSchemeFactory implements SchemeFactory {
+    public TableStatsStandardScheme getScheme() {
+      return new TableStatsStandardScheme();
+    }
+  }
+
+  private static class TableStatsStandardScheme extends StandardScheme<TableStats> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TableStats struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = iprot.readString();
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // BYTES
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.bytes = iprot.readI64();
+              struct.setBytesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // RECORD_COUNT
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.recordCount = iprot.readI64();
+              struct.setRecordCountIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // ROW_COUNT
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.rowCount = iprot.readI64();
+              struct.setRowCountIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // QUERIES
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.queries = iprot.readI64();
+              struct.setQueriesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+
+      // check for required fields of primitive type, which can't be checked in the validate method
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TableStats struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.tableName != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.tableName);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(BYTES_FIELD_DESC);
+      oprot.writeI64(struct.bytes);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(RECORD_COUNT_FIELD_DESC);
+      oprot.writeI64(struct.recordCount);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(ROW_COUNT_FIELD_DESC);
+      oprot.writeI64(struct.rowCount);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(QUERIES_FIELD_DESC);
+      oprot.writeI64(struct.queries);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TableStatsTupleSchemeFactory implements SchemeFactory {
+    public TableStatsTupleScheme getScheme() {
+      return new TableStatsTupleScheme();
+    }
+  }
+
+  private static class TableStatsTupleScheme extends TupleScheme<TableStats> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TableStats struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetTableName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetBytes()) {
+        optionals.set(1);
+      }
+      if (struct.isSetRecordCount()) {
+        optionals.set(2);
+      }
+      if (struct.isSetRowCount()) {
+        optionals.set(3);
+      }
+      if (struct.isSetQueries()) {
+        optionals.set(4);
+      }
+      oprot.writeBitSet(optionals, 5);
+      if (struct.isSetTableName()) {
+        oprot.writeString(struct.tableName);
+      }
+      if (struct.isSetBytes()) {
+        oprot.writeI64(struct.bytes);
+      }
+      if (struct.isSetRecordCount()) {
+        oprot.writeI64(struct.recordCount);
+      }
+      if (struct.isSetRowCount()) {
+        oprot.writeI64(struct.rowCount);
+      }
+      if (struct.isSetQueries()) {
+        oprot.writeI64(struct.queries);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TableStats struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(5);
+      if (incoming.get(0)) {
+        struct.tableName = iprot.readString();
+        struct.setTableNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.bytes = iprot.readI64();
+        struct.setBytesIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.recordCount = iprot.readI64();
+        struct.setRecordCountIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.rowCount = iprot.readI64();
+        struct.setRowCountIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.queries = iprot.readI64();
+        struct.setQueriesIsSet(true);
+      }
+    }
+  }
+
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-util/pom.xml
----------------------------------------------------------------------
diff --git a/src/blur-util/pom.xml b/src/blur-util/pom.xml
index 3417276..1fcc976 100644
--- a/src/blur-util/pom.xml
+++ b/src/blur-util/pom.xml
@@ -1,30 +1,23 @@
 <?xml version="1.0" encoding="UTF-8" ?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied.  See the License for the
-specific language governing permissions and limitations
-under the License.
--->
+<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor 
+	license agreements. See the NOTICE file distributed with this work for additional 
+	information regarding copyright ownership. The ASF licenses this file to 
+	you under the Apache License, Version 2.0 (the "License"); you may not use 
+	this file except in compliance with the License. You may obtain a copy of 
+	the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required 
+	by applicable law or agreed to in writing, software distributed under the 
+	License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS 
+	OF ANY KIND, either express or implied. See the License for the specific 
+	language governing permissions and limitations under the License. -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+	<modelVersion>4.0.0</modelVersion>
 	<parent>
 		<groupId>org.apache.blur</groupId>
 		<artifactId>blur</artifactId>
-		<version>0.1.3</version>
+		<version>0.1.5</version>
+		<relativePath>../pom.xml</relativePath>
 	</parent>
-	<modelVersion>4.0.0</modelVersion>
 	<groupId>org.apache.blur</groupId>
 	<artifactId>blur-util</artifactId>
 	<packaging>jar</packaging>
@@ -34,37 +27,54 @@ under the License.
 		<dependency>
 			<groupId>org.apache.zookeeper</groupId>
 			<artifactId>zookeeper</artifactId>
-			<version>3.3.4</version>
-			<scope>provided</scope>
+			<version>${zookeeper.version}</version>
+			<exclusions>
+				<exclusion>
+					<groupId>javax.mail</groupId>
+					<artifactId>mail</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>javax.jms</groupId>
+					<artifactId>jms</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>com.sun.jdmk</groupId>
+					<artifactId>jmxtools</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>com.sun.jmx</groupId>
+					<artifactId>jmxri</artifactId>
+				</exclusion>
+			</exclusions>
 		</dependency>
 		<dependency>
 			<groupId>junit</groupId>
 			<artifactId>junit</artifactId>
-			<version>4.7</version>
+			<version>${junit.version}</version>
 			<scope>test</scope>
 		</dependency>
 		<dependency>
 			<groupId>org.apache.hadoop</groupId>
 			<artifactId>hadoop-core</artifactId>
-			<version>0.20.2-cdh3u5</version>
+			<version>${hadoop.version}</version>
 			<scope>compile</scope>
 		</dependency>
 		<dependency>
 			<groupId>org.slf4j</groupId>
 			<artifactId>slf4j-api</artifactId>
-			<version>1.6.1</version>
+			<version>${slf4j.version}</version>
 			<scope>compile</scope>
 		</dependency>
 		<dependency>
 			<groupId>org.slf4j</groupId>
 			<artifactId>slf4j-log4j12</artifactId>
-			<version>1.6.1</version>
+			<version>${slf4j.version}</version>
 			<scope>provided</scope>
 		</dependency>
 		<dependency>
 			<groupId>log4j</groupId>
 			<artifactId>log4j</artifactId>
-			<version>1.2.15</version>
+			<version>${log4j.version}</version>
 			<scope>provided</scope>
 			<exclusions>
 				<exclusion>
@@ -85,17 +95,19 @@ under the License.
 				</exclusion>
 			</exclusions>
 		</dependency>
+		<dependency>
+			<groupId>com.yammer.metrics</groupId>
+			<artifactId>metrics-core</artifactId>
+			<version>${metrics-core.version}</version>
+		</dependency>
 	</dependencies>
 
+
 	<repositories>
 		<repository>
 			<id>libdir</id>
 			<url>file://${basedir}/../lib</url>
 		</repository>
-		<repository>
-			<id>cloudera</id>
-			<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
-		</repository>
 	</repositories>
 
 	<build>
@@ -110,29 +122,6 @@ under the License.
 						<target>1.6</target>
 					</configuration>
 				</plugin>
-
-				<plugin>
-					<groupId>org.apache.maven.plugins</groupId>
-					<artifactId>maven-dependency-plugin</artifactId>
-					<executions>
-						<execution>
-							<id>copy-dependencies</id>
-							<phase>package</phase>
-							<goals>
-								<goal>copy-dependencies</goal>
-							</goals>
-							<configuration>
-								<outputDirectory>${project.build.directory}/../../../lib
-								</outputDirectory>
-								<overWriteReleases>false</overWriteReleases>
-								<overWriteSnapshots>false</overWriteSnapshots>
-								<overWriteIfNewer>true</overWriteIfNewer>
-								<excludeTransitive>true</excludeTransitive>
-								<excludeArtifactIds>junit,commons-cli,commons-logging,hadoop-core,slf4j-api,slf4j-log4j12</excludeArtifactIds>
-							</configuration>
-						</execution>
-					</executions>
-				</plugin>
 				<plugin>
 					<artifactId>maven-assembly-plugin</artifactId>
 					<configuration>

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-util/src/main/java/org/apache/blur/CachedMap.java
----------------------------------------------------------------------
diff --git a/src/blur-util/src/main/java/org/apache/blur/CachedMap.java b/src/blur-util/src/main/java/org/apache/blur/CachedMap.java
new file mode 100644
index 0000000..80b1bf8
--- /dev/null
+++ b/src/blur-util/src/main/java/org/apache/blur/CachedMap.java
@@ -0,0 +1,63 @@
+package org.apache.blur;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.util.Map;
+
+public abstract class CachedMap {
+
+  /**
+   * Clears the in memory cache of the map, this forces a re-read from the
+   * source.
+   */
+  public abstract void clearCache() throws IOException;
+
+  /**
+   * Fetches the value by key, if the in memory cache is missing the value then
+   * re-read from source if missing from source return null.
+   * 
+   * @param key
+   *          the key.
+   * @return the value.
+   * @throws IOException
+   */
+  public abstract String get(String key) throws IOException;
+
+  /**
+   * Puts the value with the given key into the map if the key was missing.
+   * Returns true if the key with the given value was set otherwise false if a
+   * key already existed.
+   * 
+   * @param key
+   *          the key.
+   * @param value
+   *          the value.
+   * @return boolean true is successful, false if not.
+   */
+  public abstract boolean putIfMissing(String key, String value) throws IOException;
+
+  /**
+   * Fetches all the keys and values for the map from the source. That means
+   * this an expensive operation and should be used sparingly.
+   * 
+   * @return the map of all keys to values.
+   * @throws IOException 
+   */
+  public abstract Map<String, String> fetchAllFromSource() throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-util/src/main/java/org/apache/blur/concurrent/Executors.java
----------------------------------------------------------------------
diff --git a/src/blur-util/src/main/java/org/apache/blur/concurrent/Executors.java b/src/blur-util/src/main/java/org/apache/blur/concurrent/Executors.java
index 47bba75..f6b8fb2 100644
--- a/src/blur-util/src/main/java/org/apache/blur/concurrent/Executors.java
+++ b/src/blur-util/src/main/java/org/apache/blur/concurrent/Executors.java
@@ -26,9 +26,16 @@ import java.util.concurrent.atomic.AtomicInteger;
 public class Executors {
 
   public static ExecutorService newThreadPool(String prefix, int threadCount) {
+    return newThreadPool(prefix, threadCount, true);
+  }
+
+  public static ExecutorService newThreadPool(String prefix, int threadCount, boolean watch) {
     ThreadPoolExecutor executorService = new ThreadPoolExecutor(threadCount, threadCount, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new BlurThreadFactory(prefix));
     executorService.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
-    return ThreadWatcher.instance().watch(executorService);
+    if (watch) {
+      return ThreadWatcher.instance().watch(executorService);
+    }
+    return executorService;
   }
 
   public static ExecutorService newSingleThreadExecutor(String prefix) {

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-util/src/main/java/org/apache/blur/metrics/AtomicLongGauge.java
----------------------------------------------------------------------
diff --git a/src/blur-util/src/main/java/org/apache/blur/metrics/AtomicLongGauge.java b/src/blur-util/src/main/java/org/apache/blur/metrics/AtomicLongGauge.java
new file mode 100644
index 0000000..02e6fce
--- /dev/null
+++ b/src/blur-util/src/main/java/org/apache/blur/metrics/AtomicLongGauge.java
@@ -0,0 +1,24 @@
+package org.apache.blur.metrics;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.yammer.metrics.core.Gauge;
+
+public class AtomicLongGauge extends Gauge<Long> {
+
+  private final AtomicLong at;
+
+  public AtomicLongGauge(AtomicLong at) {
+    this.at = at;
+  }
+
+  @Override
+  public Long value() {
+    return at.get();
+  }
+
+  public static Gauge<Long> wrap(AtomicLong at) {
+    return new AtomicLongGauge(at);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-util/src/main/java/org/apache/blur/metrics/MetricsConstants.java
----------------------------------------------------------------------
diff --git a/src/blur-util/src/main/java/org/apache/blur/metrics/MetricsConstants.java b/src/blur-util/src/main/java/org/apache/blur/metrics/MetricsConstants.java
new file mode 100644
index 0000000..f5e0a09
--- /dev/null
+++ b/src/blur-util/src/main/java/org/apache/blur/metrics/MetricsConstants.java
@@ -0,0 +1,25 @@
+package org.apache.blur.metrics;
+
+public class MetricsConstants {
+  public static final String LUCENE = "Lucene";
+  public static final String BLUR = "Blur";
+  public static final String ORG_APACHE_BLUR = "org.apache.blur";
+  public static final String INTERNAL_BUFFERS = "Internal Buffers";
+  public static final String OTHER_SIZES_ALLOCATED = "Other Sizes Allocated";
+  public static final String _8K_SIZE_ALLOCATED = "8K Size Allocated";
+  public static final String _1K_SIZE_ALLOCATED = "1K Size Allocated";
+  public static final String LOST = "Lost";
+  public static final String THRIFT_CALLS = "Thrift Calls in \u00B5s";
+  public static final String REQUESTS = "Requests";
+  public static final String DELETE = "delete";
+  public static final String UPDATE = "update";
+  public static final String ADD = "add";
+  public static final String SEARCH = "search";
+  public static final String REMOTE = "remote";
+  public static final String HDFS = "HDFS";
+  public static final String LOCAL = "local";
+  public static final String HIT = "Hit";
+  public static final String MISS = "Miss";
+  public static final String CACHE = "Cache";
+  public static final String EVICTION = "Eviction";
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-util/src/main/java/org/apache/blur/zookeeper/WatchChildren.java
----------------------------------------------------------------------
diff --git a/src/blur-util/src/main/java/org/apache/blur/zookeeper/WatchChildren.java b/src/blur-util/src/main/java/org/apache/blur/zookeeper/WatchChildren.java
index d880493..8c7c3da 100644
--- a/src/blur-util/src/main/java/org/apache/blur/zookeeper/WatchChildren.java
+++ b/src/blur-util/src/main/java/org/apache/blur/zookeeper/WatchChildren.java
@@ -32,7 +32,6 @@ import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.ZooKeeper;
 
-
 public class WatchChildren implements Closeable {
 
   private final static Log LOG = LogFactory.getLog(WatchChildren.class);
@@ -69,7 +68,7 @@ public class WatchChildren implements Closeable {
     _watchThread = new Thread(new Runnable() {
       @Override
       public void run() {
-        startDoubleCheckThread();
+//        startDoubleCheckThread();
         while (_running.get()) {
           synchronized (_lock) {
             try {
@@ -81,7 +80,11 @@ public class WatchChildren implements Closeable {
                   }
                 }
               });
-              onChange.action(_children);
+              try {
+                onChange.action(_children);
+              } catch (Throwable t) {
+                LOG.error("Unknown error during onchange action [" + this + "].", t);
+              }
               _lock.wait();
             } catch (KeeperException e) {
               LOG.error("Error in instance [{0}]", e, instance);

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-util/src/main/java/org/apache/blur/zookeeper/ZkCachedMap.java
----------------------------------------------------------------------
diff --git a/src/blur-util/src/main/java/org/apache/blur/zookeeper/ZkCachedMap.java b/src/blur-util/src/main/java/org/apache/blur/zookeeper/ZkCachedMap.java
new file mode 100644
index 0000000..22eb9e6
--- /dev/null
+++ b/src/blur-util/src/main/java/org/apache/blur/zookeeper/ZkCachedMap.java
@@ -0,0 +1,225 @@
+package org.apache.blur.zookeeper;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.blur.CachedMap;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.data.Stat;
+
+/**
+ * This is an simple implementation of a set-once map of string-to-string that
+ * is backed by ZooKeeper. Meaning that once the value is set a single time it
+ * cannot be set to a different value. The clear cache method is called when the
+ * internal cache is to be cleared and re-read from ZooKeeper. <br>
+ * <br>
+ * Usage:<br>
+ * <br>
+ * ZkCachedMap map = new ZkCachedMap(zooKeeper, path);<br>
+ * String key = "key";<br>
+ * String newValue = "value";<br>
+ * String value = map.get(key);<br>
+ * if (value == null) {<br>
+ * &nbsp;&nbsp;if (map.putIfMissing(key, newValue)) {<br>
+ * &nbsp;&nbsp;&nbsp;&nbsp;System.out.println("Yay! My value was taken.");<br>
+ * &nbsp;&nbsp;&nbsp;&nbsp;value = newValue;<br>
+ * &nbsp;&nbsp;} else {<br>
+ * &nbsp;&nbsp;&nbsp;&nbsp;System.out.println("Boo! Someone beat me to it.");<br>
+ * &nbsp;&nbsp;&nbsp;&nbsp;value = map.get(key);<br>
+ * &nbsp;&nbsp;}<br>
+ * }<br>
+ * System.out.println("key [" + key + "] value [" + value + "]");<br>
+ * 
+ */
+public class ZkCachedMap extends CachedMap {
+
+  private static final String SEP = "-";
+
+  private final Map<String, String> cache = new ConcurrentHashMap<String, String>();
+  private final ZooKeeper zooKeeper;
+  private final String basePath;
+
+  public ZkCachedMap(ZooKeeper zooKeeper, String basePath) {
+    this.zooKeeper = zooKeeper;
+    this.basePath = basePath;
+  }
+
+  @Override
+  public void clearCache() {
+    cache.clear();
+  }
+
+  /**
+   * Checks the in memory map first, then fetches from ZooKeeper.
+   * 
+   * @param key
+   *          the key.
+   * @return the value, null if it does not exist.
+   * @exception IOException
+   *              if there is an io error.
+   */
+  @Override
+  public String get(String key) throws IOException {
+    String value = cache.get(key);
+    if (value != null) {
+      return value;
+    }
+    return getFromZooKeeper(key);
+  }
+
+  /**
+   * Checks the in memory map first, if it exists then return true. If missing
+   * then check ZooKeeper.
+   * 
+   * @param key
+   *          the key.
+   * @param value
+   *          the value.
+   * @return boolean, true if the put was successful, false if a value already
+   *         exists.
+   * @exception IOException
+   *              if there is an io error.
+   */
+  @Override
+  public boolean putIfMissing(String key, String value) throws IOException {
+    String existingValue = cache.get(key);
+    if (existingValue != null) {
+      return false;
+    }
+    return putIfMissingFromZooKeeper(key, value);
+  }
+
+  private String getFromZooKeeper(String key) throws IOException {
+    try {
+      List<String> keys = new ArrayList<String>(zooKeeper.getChildren(basePath, false));
+      Collections.sort(keys);
+      for (String k : keys) {
+        String realKey = getRealKey(k);
+        if (realKey.equals(key)) {
+          String path = getPath(k);
+          byte[] data = getValue(path);
+          if (data == null) {
+            return null;
+          }
+          String value = new String(data);
+          cache.put(key, value);
+          return value;
+        }
+      }
+      return null;
+    } catch (KeeperException e) {
+      throw new IOException(e);
+    } catch (InterruptedException e) {
+      throw new IOException(e);
+    }
+  }
+
+  private byte[] getValue(String path) throws KeeperException, InterruptedException {
+    Stat stat = zooKeeper.exists(path, false);
+    if (stat == null) {
+      return null;
+    }
+    byte[] data = zooKeeper.getData(path, false, stat);
+    if (data == null) {
+      return null;
+    }
+    return data;
+  }
+
+  private boolean putIfMissingFromZooKeeper(String key, String value) throws IOException {
+    try {
+      String path = getPath(key);
+      String newPath = zooKeeper.create(path + SEP, value.getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL);
+      String keyWithSeq = getKeyWithSeq(newPath);
+      List<String> keys = new ArrayList<String>(zooKeeper.getChildren(basePath, false));
+      Collections.sort(keys);
+      for (String k : keys) {
+        String realKey = getRealKey(k);
+        if (realKey.equals(key)) {
+          if (keyWithSeq.equals(k)) {
+            // got the lock
+            cache.put(key, value);
+            return true;
+          } else {
+            // remove duplicate key
+            zooKeeper.delete(newPath, -1);
+            return false;
+          }
+        }
+      }
+      return false;
+    } catch (KeeperException e) {
+      throw new IOException(e);
+    } catch (InterruptedException e) {
+      throw new IOException(e);
+    }
+  }
+
+  private String getKeyWithSeq(String newPath) {
+    int lastIndexOf = newPath.lastIndexOf('/');
+    if (lastIndexOf < 0) {
+      throw new RuntimeException("Path [" + newPath + "] does not contain [/]");
+    }
+    return newPath.substring(lastIndexOf + 1);
+  }
+
+  private String getRealKey(String keyWithSeq) {
+    int lastIndexOf = keyWithSeq.lastIndexOf(SEP);
+    if (lastIndexOf < 0) {
+      throw new RuntimeException("Key [" + keyWithSeq + "] does not contain [" + SEP + "]");
+    }
+    return keyWithSeq.substring(0, lastIndexOf);
+  }
+
+  private String getPath(String key) {
+    return basePath + "/" + key;
+  }
+
+  @Override
+  public Map<String, String> fetchAllFromSource() throws IOException {
+    try {
+      Map<String, String> result = new HashMap<String, String>();
+      List<String> keys = new ArrayList<String>(zooKeeper.getChildren(basePath, false));
+      Collections.sort(keys);
+      for (String k : keys) {
+        String realKey = getRealKey(k);
+        String path = getPath(k);
+        byte[] value = getValue(path);
+        if (value != null) {
+          result.put(realKey, new String(value));
+        }
+      }
+      return result;
+    } catch (KeeperException e) {
+      throw new IOException(e);
+    } catch (InterruptedException e) {
+      throw new IOException(e);
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/distribution/pom.xml
----------------------------------------------------------------------
diff --git a/src/distribution/pom.xml b/src/distribution/pom.xml
new file mode 100644
index 0000000..e52fa2a
--- /dev/null
+++ b/src/distribution/pom.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.blur</groupId>
+    <artifactId>blur</artifactId>
+    <version>0.1.5</version>
+    <relativePath>../pom.xml</relativePath>
+  </parent>
+  
+  <artifactId>apache-blur</artifactId>
+
+  <packaging>pom</packaging>
+
+  <name>Distribution</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.blur</groupId>
+      <artifactId>blur-mapred</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.blur</groupId>
+      <artifactId>blur-shell</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>distro-assembly</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+            <configuration>
+              <descriptors>
+                <descriptor>src/assemble/bin.xml</descriptor>
+              </descriptors>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/distribution/src/assemble/bin.xml
----------------------------------------------------------------------
diff --git a/src/distribution/src/assemble/bin.xml b/src/distribution/src/assemble/bin.xml
new file mode 100644
index 0000000..4b266fe
--- /dev/null
+++ b/src/distribution/src/assemble/bin.xml
@@ -0,0 +1,36 @@
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
+  <id>bin</id>
+  <formats>
+    <format>tar.gz</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+
+  <dependencySets>
+    <dependencySet>
+          <!--
+             Project artifact is not copied under library directory since
+             it is added to the root directory of the zip package.
+         -->
+      <useProjectArtifact>false</useProjectArtifact>
+      <outputDirectory>apache-blur-${project.version}/lib</outputDirectory>
+      <unpack>false</unpack>
+    </dependencySet>
+  </dependencySets>
+
+  <fileSets>
+      <!--
+         Adds startup scripts to the root directory of zip package. The startup
+         scripts are located to src/main/scripts directory as stated by Maven
+         conventions.
+     -->
+    <fileSet>
+      <directory>${project.build.scriptSourceDirectory}</directory>
+      <outputDirectory>apache-blur-${project.version}</outputDirectory>
+      <excludes>
+        <exclude>**/.empty</exclude>
+      </excludes>
+    </fileSet>
+  </fileSets>
+</assembly>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/distribution/src/main/scripts/bin/blur
----------------------------------------------------------------------
diff --git a/src/distribution/src/main/scripts/bin/blur b/src/distribution/src/main/scripts/bin/blur
new file mode 100755
index 0000000..afe5ebb
--- /dev/null
+++ b/src/distribution/src/main/scripts/bin/blur
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/blur-config.sh
+PROC_NAME=$1
+"$JAVA_HOME"/bin/java -Dblur.name=$PROC_NAME -Djava.library.path=$JAVA_LIBRARY_PATH $BLUR_COMMAND -Dblur.logs.dir=$BLUR_LOGS -Dblur.log.file=$PROC_NAME.log -cp $BLUR_CLASSPATH $@
+
+
+

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/distribution/src/main/scripts/bin/blur-config.sh
----------------------------------------------------------------------
diff --git a/src/distribution/src/main/scripts/bin/blur-config.sh b/src/distribution/src/main/scripts/bin/blur-config.sh
new file mode 100755
index 0000000..bd6b016
--- /dev/null
+++ b/src/distribution/src/main/scripts/bin/blur-config.sh
@@ -0,0 +1,104 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+export BLUR_HOME="$bin"/..
+export BLUR_HOME_CONF=$BLUR_HOME/conf
+
+. $BLUR_HOME/conf/blur-env.sh
+if [ -z "$JAVA_HOME" ]; then
+  cat 1>&2 <<EOF
++======================================================================+
+|      Error: JAVA_HOME is not set and Java could not be found         |
++----------------------------------------------------------------------+
+| Please download the latest Sun JDK from the Sun Java web site        |
+|       > http://java.sun.com/javase/downloads/ <                      |
+|                                                                      |
+| Hadoop and Blur requires Java 1.6 or later.                          |
+| NOTE: This script will find Sun Java whether you install using the   |
+|       binary or the RPM based installer.                             |
++======================================================================+
+EOF
+  exit 1
+fi
+
+if [ -z "$HADOOP_HOME" ]; then
+  cat 1>&2 <<EOF
++======================================================================+
+|      Error: HADOOP_HOME is not set                                   |
++----------------------------------------------------------------------+
+| Please download the stable Hadoop version from Apache web site       |
+|       > http://hadoop.apache.org/ <                                  |
+|                                                                      |
+| Blur requires Hadoop 0.20.205 or later.                              |
++======================================================================+
+EOF
+  exit 1
+fi
+
+export JAVA=$JAVA_HOME/bin/java
+
+export BLUR_LOGS=${BLUR_LOGS:=$BLUR_HOME/logs}
+
+if [ ! -d "$BLUR_LOGS" ]; then
+  mkdir -p $BLUR_LOGS
+fi
+
+if [ ! -d "$BLUR_HOME/pids" ]; then
+  mkdir -p $BLUR_HOME/pids
+fi
+
+BLUR_CLASSPATH=$BLUR_HOME/conf
+
+for f in $HADOOP_HOME/*.jar; do
+  BLUR_CLASSPATH=${BLUR_CLASSPATH}:$f;
+done
+
+for f in $HADOOP_HOME/lib/*.jar; do
+  BLUR_CLASSPATH=${BLUR_CLASSPATH}:$f;
+done
+
+for f in $BLUR_HOME/lib/*.jar; do
+  BLUR_CLASSPATH=${BLUR_CLASSPATH}:$f;
+done
+
+for f in $BLUR_HOME/lib/*.war; do
+  BLUR_CLASSPATH=${BLUR_CLASSPATH}:$f;
+done
+
+for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do
+ BLUR_CLASSPATH=${BLUR_CLASSPATH}:$f;
+done
+
+export BLUR_CLASSPATH
+
+# setup 'java.library.path' for native-hadoop code if necessary
+if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" -o -d "${HADOOP_HOME}/sbin" ]; then
+  JAVA_PLATFORM=`CLASSPATH=${BLUR_CLASSPATH} ${JAVA} -Xmx32m ${HADOOP_JAVA_PLATFORM_OPTS} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"`
+
+  if [ -d "${HADOOP_HOME}/lib/native" ]; then
+    if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+      JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
+    else
+      JAVA_LIBRARY_PATH=${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
+    fi
+  fi
+fi
+
+HOSTNAME=`hostname`

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/distribution/src/main/scripts/bin/controllers.sh
----------------------------------------------------------------------
diff --git a/src/distribution/src/main/scripts/bin/controllers.sh b/src/distribution/src/main/scripts/bin/controllers.sh
new file mode 100755
index 0000000..37c7108
--- /dev/null
+++ b/src/distribution/src/main/scripts/bin/controllers.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/blur-config.sh
+
+export HOSTLIST="${BLUR_HOME_CONF}/controllers"
+
+for controller in `cat "$HOSTLIST"|sed  "s/#.*$//;/^$/d"`; do
+ ssh $BLUR_SSH_OPTS $controller $"${@// /\\ }" \
+   2>&1 | sed "s/^/$controller: /" &
+ if [ "$BLUR_CONTROLLER_SLEEP" != "" ]; then
+   sleep $BLUR_CONTROLLER_SLEEP
+ fi
+done
+
+wait
+

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/distribution/src/main/scripts/bin/shards.sh
----------------------------------------------------------------------
diff --git a/src/distribution/src/main/scripts/bin/shards.sh b/src/distribution/src/main/scripts/bin/shards.sh
new file mode 100755
index 0000000..6b22678
--- /dev/null
+++ b/src/distribution/src/main/scripts/bin/shards.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/blur-config.sh
+
+export HOSTLIST="${BLUR_HOME_CONF}/shards"
+
+for shard in `cat "$HOSTLIST"|sed  "s/#.*$//;/^$/d"`; do
+ ssh $BLUR_SSH_OPTS $shard $"${@// /\\ }" \
+   2>&1 | sed "s/^/$shard: /" &
+ if [ "$BLUR_SHARD_SLEEP" != "" ]; then
+   sleep $BLUR_SHARD_SLEEP
+ fi
+done
+
+wait
+

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/distribution/src/main/scripts/bin/start-all.sh
----------------------------------------------------------------------
diff --git a/src/distribution/src/main/scripts/bin/start-all.sh b/src/distribution/src/main/scripts/bin/start-all.sh
new file mode 100755
index 0000000..7da121a
--- /dev/null
+++ b/src/distribution/src/main/scripts/bin/start-all.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/blur-config.sh
+
+$BLUR_HOME/bin/start-shards.sh
+$BLUR_HOME/bin/start-controllers.sh
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/distribution/src/main/scripts/bin/start-controller-server.sh
----------------------------------------------------------------------
diff --git a/src/distribution/src/main/scripts/bin/start-controller-server.sh b/src/distribution/src/main/scripts/bin/start-controller-server.sh
new file mode 100755
index 0000000..750152b
--- /dev/null
+++ b/src/distribution/src/main/scripts/bin/start-controller-server.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/blur-config.sh
+
+INSTANCE=0
+while [  $INSTANCE -lt $BLUR_NUMBER_OF_CONTROLLER_SERVER_INSTANCES_PER_MACHINE ]; do
+  PID_FILE=$BLUR_HOME/pids/controller-$INSTANCE.pid
+
+  if [ -f $PID_FILE ]; then
+    if kill -0 `cat $PID_FILE` > /dev/null 2>&1; then
+      echo Controller server already running as process `cat $PID_FILE`.  Stop it first.
+      let INSTANCE=INSTANCE+1
+      continue
+    fi
+  fi
+
+  PROC_NAME=blur-controller-server-$HOSTNAME-$INSTANCE
+  nohup "$JAVA_HOME"/bin/java -Dblur.name=$PROC_NAME -Djava.library.path=$JAVA_LIBRARY_PATH -Dblur-controller-$INSTANCE $BLUR_CONTROLLER_JVM_OPTIONS -Dblur.logs.dir=$BLUR_LOGS -Dblur.log.file=$PROC_NAME.log -cp $BLUR_CLASSPATH org.apache.blur.thrift.ThriftBlurControllerServer -s $INSTANCE > "$BLUR_LOGS/$PROC_NAME.out" 2>&1 < /dev/null &
+  echo $! > $PID_FILE
+  echo Controller [$INSTANCE] starting as process `cat $PID_FILE`.
+
+  let INSTANCE=INSTANCE+1 
+done
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/distribution/src/main/scripts/bin/start-controllers.sh
----------------------------------------------------------------------
diff --git a/src/distribution/src/main/scripts/bin/start-controllers.sh b/src/distribution/src/main/scripts/bin/start-controllers.sh
new file mode 100755
index 0000000..c9b77f1
--- /dev/null
+++ b/src/distribution/src/main/scripts/bin/start-controllers.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/blur-config.sh
+
+$BLUR_HOME/bin/controllers.sh $BLUR_HOME/bin/start-controller-server.sh
+

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/distribution/src/main/scripts/bin/start-shard-server.sh
----------------------------------------------------------------------
diff --git a/src/distribution/src/main/scripts/bin/start-shard-server.sh b/src/distribution/src/main/scripts/bin/start-shard-server.sh
new file mode 100755
index 0000000..99a4611
--- /dev/null
+++ b/src/distribution/src/main/scripts/bin/start-shard-server.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/blur-config.sh
+
+INSTANCE=0
+while [  $INSTANCE -lt $BLUR_NUMBER_OF_SHARD_SERVER_INSTANCES_PER_MACHINE ]; do
+  PID_FILE=$BLUR_HOME/pids/shard-$INSTANCE.pid
+
+  if [ -f $PID_FILE ]; then
+    if kill -0 `cat $PID_FILE` > /dev/null 2>&1; then
+      echo Shard server already running as process `cat $PID_FILE`.  Stop it first.
+      let INSTANCE=INSTANCE+1
+      continue
+    fi
+  fi
+
+  PROC_NAME=blur-shard-server-$HOSTNAME-$INSTANCE
+  nohup "$JAVA_HOME"/bin/java -Dblur.name=$PROC_NAME -Djava.library.path=$JAVA_LIBRARY_PATH -Dblur-shard-$INSTANCE $BLUR_SHARD_JVM_OPTIONS -Dblur.logs.dir=$BLUR_LOGS -Dblur.log.file=$PROC_NAME.log -cp $BLUR_CLASSPATH org.apache.blur.thrift.ThriftBlurShardServer -s $INSTANCE > "$BLUR_LOGS/$PROC_NAME.out" 2>&1 < /dev/null &
+  echo $! > $PID_FILE
+  echo Shard [$INSTANCE] starting as process `cat $PID_FILE`.
+
+  let INSTANCE=INSTANCE+1 
+done
+
+


Mime
View raw message