incubator-blur-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From amccu...@apache.org
Subject [40/51] [abbrv] Massive changes to the the thrift API. Getting closer to the final version, this commit also removes the controller from the architecture.
Date Tue, 11 Dec 2012 02:21:00 GMT
http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/analysis/BlurAnalyzer.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/analysis/BlurAnalyzer.java b/src/blur-core/src/main/java/org/apache/blur/analysis/BlurAnalyzer.java
index 5f8f256..08faf6f 100644
--- a/src/blur-core/src/main/java/org/apache/blur/analysis/BlurAnalyzer.java
+++ b/src/blur-core/src/main/java/org/apache/blur/analysis/BlurAnalyzer.java
@@ -17,15 +17,8 @@ package org.apache.blur.analysis;
  * limitations under the License.
  */
 import static org.apache.blur.lucene.LuceneVersionConstant.LUCENE_VERSION;
-import static org.apache.blur.utils.BlurConstants.PRIME_DOC;
-import static org.apache.blur.utils.BlurConstants.RECORD_ID;
-import static org.apache.blur.utils.BlurConstants.ROW_ID;
-import static org.apache.blur.utils.BlurConstants.SUPER;
 
-import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.lang.reflect.Constructor;
@@ -35,27 +28,14 @@ import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.blur.thrift.generated.AlternateColumnDefinition;
-import org.apache.blur.thrift.generated.AnalyzerDefinition;
-import org.apache.blur.thrift.generated.ColumnDefinition;
-import org.apache.blur.thrift.generated.ColumnFamilyDefinition;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.AnalyzerWrapper;
 import org.apache.lucene.analysis.core.KeywordAnalyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Field.Index;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.util.Version;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TJSONProtocol;
-import org.apache.thrift.transport.TMemoryBuffer;
-import org.apache.thrift.transport.TMemoryInputTransport;
 
 public final class BlurAnalyzer extends AnalyzerWrapper {
 
@@ -81,7 +61,6 @@ public final class BlurAnalyzer extends AnalyzerWrapper {
   private Map<String, Set<String>> _subIndexNameLookups = new HashMap<String, Set<String>>();
   private Map<String, Boolean> _fullTextFields = new HashMap<String, Boolean>();
   private Map<String, Boolean> _fullTextColumnFamilies = new HashMap<String, Boolean>();
-  private AnalyzerDefinition _analyzerDefinition;
   private Analyzer _fullTextAnalyzer = new StandardAnalyzer(LUCENE_VERSION);
   private Analyzer _defaultAnalyzer;
   private Map<String, Analyzer> _analyzers = new HashMap<String, Analyzer>();
@@ -92,35 +71,21 @@ public final class BlurAnalyzer extends AnalyzerWrapper {
   }
 
   public BlurAnalyzer(Analyzer analyzer) {
-    _analyzerDefinition = new AnalyzerDefinition();
     _defaultAnalyzer = analyzer;
   }
 
-  public BlurAnalyzer(AnalyzerDefinition analyzerDefinition) {
-    _analyzerDefinition = analyzerDefinition;
-    ColumnDefinition defaultDefinition = analyzerDefinition.getDefaultDefinition();
-    String fullTextAnalyzerClassName = analyzerDefinition.fullTextAnalyzerClassName;
-    if (fullTextAnalyzerClassName != null) {
-      _fullTextAnalyzer = getAnalyzerByClassName(fullTextAnalyzerClassName, aliases, null, null);
-    }
-    if (defaultDefinition == null) {
-      defaultDefinition = new ColumnDefinition(STANDARD, true, null);
-      analyzerDefinition.setDefaultDefinition(defaultDefinition);
-    }
-    _defaultAnalyzer = getAnalyzerByClassName(defaultDefinition.getAnalyzerClassName(), aliases, null, null);
-    KeywordAnalyzer keywordAnalyzer = new KeywordAnalyzer();
-    _analyzers = new HashMap<String, Analyzer>();
-    _analyzers.put(ROW_ID, keywordAnalyzer);
-    _analyzers.put(RECORD_ID, keywordAnalyzer);
-    _analyzers.put(PRIME_DOC, keywordAnalyzer);
-    _analyzers.put(SUPER, _fullTextAnalyzer);
-    load(_analyzers, _analyzerDefinition.columnFamilyDefinitions, _fullTextFields, _subIndexNameLookups, _storeMap, _fullTextColumnFamilies, _typeLookup);
-  }
-
   public BlurAnalyzer() {
     this(new StandardAnalyzer(LUCENE_VERSION));
   }
 
+  public BlurAnalyzer(org.apache.blur.thrift.generated.Analyzer analyzer) {
+    this(convert(analyzer));
+  }
+
+  private static Analyzer convert(org.apache.blur.thrift.generated.Analyzer analyzer) {
+    return null;
+  }
+
   private Analyzer getAnalyzer(String name) {
     Analyzer analyzer = _analyzers.get(name);
     return analyzer;
@@ -166,26 +131,6 @@ public final class BlurAnalyzer extends AnalyzerWrapper {
     return null;
   }
 
-  public boolean isFullTextField(String fieldName) {
-    Boolean b = _fullTextFields.get(fieldName);
-    if (b != null) {
-      return b;
-    }
-    String cf = getColumnFamily(fieldName);
-    if (cf == null) {
-      return false;
-    }
-    b = _fullTextColumnFamilies.get(cf);
-    if (b != null) {
-      return b;
-    }
-    ColumnDefinition defaultDefinition = _analyzerDefinition.getDefaultDefinition();
-    if (defaultDefinition != null && defaultDefinition.fullTextIndex) {
-      return true;
-    }
-    return false;
-  }
-
   public Store getStore(String indexName) {
     Store store = _storeMap.get(indexName);
     if (store == null) {
@@ -194,23 +139,6 @@ public final class BlurAnalyzer extends AnalyzerWrapper {
     return store;
   }
 
-  public Index getIndex(String indexName) {
-    return Index.ANALYZED_NO_NORMS;
-  }
-
-  public String toJSON() {
-    TMemoryBuffer trans = new TMemoryBuffer(1024);
-    TJSONProtocol protocol = new TJSONProtocol(trans);
-    try {
-      _analyzerDefinition.write(protocol);
-    } catch (TException e) {
-      throw new RuntimeException(e);
-    }
-    trans.close();
-    byte[] array = trans.getArray();
-    return new String(array, 0, trans.length());
-  }
-
   private String getColumnFamily(String fieldName) {
     int index = fieldName.indexOf('.');
     if (index < 0) {
@@ -219,10 +147,6 @@ public final class BlurAnalyzer extends AnalyzerWrapper {
     return fieldName.substring(0, index);
   }
 
-  public AnalyzerDefinition getAnalyzerDefinition() {
-    return _analyzerDefinition;
-  }
-
   public void close() {
 
   }
@@ -238,47 +162,6 @@ public final class BlurAnalyzer extends AnalyzerWrapper {
     return components;
   }
 
-  public static BlurAnalyzer create(File file) throws IOException {
-    FileInputStream inputStream = new FileInputStream(file);
-    try {
-      return create(inputStream);
-    } finally {
-      inputStream.close();
-    }
-  }
-
-  public static BlurAnalyzer create(InputStream inputStream) throws IOException {
-    TMemoryInputTransport trans = new TMemoryInputTransport(getBytes(inputStream));
-    TJSONProtocol protocol = new TJSONProtocol(trans);
-    AnalyzerDefinition analyzerDefinition = new AnalyzerDefinition();
-    try {
-      analyzerDefinition.read(protocol);
-    } catch (TException e) {
-      throw new RuntimeException(e);
-    }
-    trans.close();
-    return new BlurAnalyzer(analyzerDefinition);
-  }
-
-  public static BlurAnalyzer create(String jsonStr) throws IOException {
-    InputStream inputStream = new ByteArrayInputStream(jsonStr.getBytes());
-    try {
-      return create(inputStream);
-    } finally {
-      inputStream.close();
-    }
-  }
-
-  public static BlurAnalyzer create(Path path) throws IOException {
-    FileSystem fileSystem = FileSystem.get(path.toUri(), new Configuration());
-    FSDataInputStream inputStream = fileSystem.open(path);
-    try {
-      return create(inputStream);
-    } finally {
-      inputStream.close();
-    }
-  }
-
   private static byte[] getBytes(InputStream inputStream) throws IOException {
     ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
     byte[] buffer = new byte[1024];
@@ -291,60 +174,6 @@ public final class BlurAnalyzer extends AnalyzerWrapper {
     return outputStream.toByteArray();
   }
 
-  private static void load(Map<String, Analyzer> analyzers, Map<String, ColumnFamilyDefinition> familyDefinitions, Map<String, Boolean> fullTextFields,
-      Map<String, Set<String>> subIndexNameLookups, Map<String, Store> storeMap, Map<String, Boolean> fullTextColumnFamilies, Map<String, TYPE> typeLookup) {
-    if (familyDefinitions != null) {
-      for (String family : familyDefinitions.keySet()) {
-        ColumnFamilyDefinition familyDefinition = familyDefinitions.get(family);
-        ColumnDefinition defaultDefinition = familyDefinition.getDefaultDefinition();
-        if (defaultDefinition != null) {
-          fullTextColumnFamilies.put(family, defaultDefinition.isFullTextIndex());
-        }
-        load(family, familyDefinition, analyzers, fullTextFields, subIndexNameLookups, storeMap, typeLookup);
-      }
-    }
-  }
-
-  private static void load(String family, ColumnFamilyDefinition familyDefinition, Map<String, Analyzer> analyzers, Map<String, Boolean> fullTextFields,
-      Map<String, Set<String>> subIndexNameLookups, Map<String, Store> storeMap, Map<String, TYPE> typeLookup) {
-    Map<String, ColumnDefinition> columnDefinitions = familyDefinition.getColumnDefinitions();
-    if (columnDefinitions != null) {
-      for (String column : columnDefinitions.keySet()) {
-        ColumnDefinition columnDefinition = columnDefinitions.get(column);
-        load(family, familyDefinition, column, columnDefinition, analyzers, fullTextFields, subIndexNameLookups, storeMap, typeLookup);
-      }
-    }
-  }
-
-  private static void load(String family, ColumnFamilyDefinition familyDefinition, String column, ColumnDefinition columnDefinition, Map<String, Analyzer> analyzers,
-      Map<String, Boolean> fullTextFields, Map<String, Set<String>> subIndexNameLookups, Map<String, Store> storeMap, Map<String, TYPE> typeLookup) {
-    Map<String, AlternateColumnDefinition> alternateColumnDefinitions = columnDefinition.getAlternateColumnDefinitions();
-    if (alternateColumnDefinitions != null) {
-      for (String subColumn : alternateColumnDefinitions.keySet()) {
-        AlternateColumnDefinition alternateColumnDefinition = alternateColumnDefinitions.get(subColumn);
-        load(family, familyDefinition, column, columnDefinition, subColumn, alternateColumnDefinition, analyzers, subIndexNameLookups, storeMap, typeLookup);
-      }
-    }
-    String fieldName = family + "." + column;
-    Analyzer analyzer = getAnalyzerByClassName(columnDefinition.getAnalyzerClassName(), aliases, fieldName, typeLookup);
-    analyzers.put(fieldName, analyzer);
-    if (columnDefinition.isFullTextIndex()) {
-      fullTextFields.put(fieldName, Boolean.TRUE);
-    } else {
-      fullTextFields.put(fieldName, Boolean.FALSE);
-    }
-  }
-
-  private static void load(String family, ColumnFamilyDefinition familyDefinition, String column, ColumnDefinition columnDefinition, String subColumn,
-      AlternateColumnDefinition alternateColumnDefinition, Map<String, Analyzer> analyzers, Map<String, Set<String>> subIndexNameLookups, Map<String, Store> storeMap,
-      Map<String, TYPE> typeLookup) {
-    String fieldName = family + "." + column + "." + subColumn;
-    Analyzer analyzer = getAnalyzerByClassName(alternateColumnDefinition.getAnalyzerClassName(), aliases, fieldName, typeLookup);
-    analyzers.put(fieldName, analyzer);
-    putStore(fieldName, Store.NO, storeMap);
-    addSubField(fieldName, subIndexNameLookups);
-  }
-
   private static void putStore(String name, Store store, Map<String, Store> storeMap) {
     storeMap.put(name, store);
   }

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/lucene/search/ScoreType.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/lucene/search/ScoreType.java b/src/blur-core/src/main/java/org/apache/blur/lucene/search/ScoreType.java
new file mode 100644
index 0000000..d684726
--- /dev/null
+++ b/src/blur-core/src/main/java/org/apache/blur/lucene/search/ScoreType.java
@@ -0,0 +1,6 @@
+package org.apache.blur.lucene.search;
+
+public enum ScoreType {
+  AGGREGATE, BEST, CONSTANT, SUPER
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/lucene/search/SuperParser.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/lucene/search/SuperParser.java b/src/blur-core/src/main/java/org/apache/blur/lucene/search/SuperParser.java
index b97124f..2a1b039 100644
--- a/src/blur-core/src/main/java/org/apache/blur/lucene/search/SuperParser.java
+++ b/src/blur-core/src/main/java/org/apache/blur/lucene/search/SuperParser.java
@@ -25,7 +25,6 @@ import java.util.UUID;
 
 import org.apache.blur.analysis.BlurAnalyzer;
 import org.apache.blur.analysis.BlurAnalyzer.TYPE;
-import org.apache.blur.thrift.generated.ScoreType;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queryparser.classic.ParseException;
 import org.apache.lucene.queryparser.classic.QueryParser;

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/lucene/search/SuperQuery.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/lucene/search/SuperQuery.java b/src/blur-core/src/main/java/org/apache/blur/lucene/search/SuperQuery.java
index 69bf4c6..ddd14ef 100644
--- a/src/blur-core/src/main/java/org/apache/blur/lucene/search/SuperQuery.java
+++ b/src/blur-core/src/main/java/org/apache/blur/lucene/search/SuperQuery.java
@@ -18,7 +18,6 @@ package org.apache.blur.lucene.search;
  */
 import java.io.IOException;
 
-import org.apache.blur.thrift.generated.ScoreType;
 import org.apache.blur.utils.PrimeDocCache;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/IndexManager.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/IndexManager.java b/src/blur-core/src/main/java/org/apache/blur/manager/IndexManager.java
index 4ac059b..f83f596 100644
--- a/src/blur-core/src/main/java/org/apache/blur/manager/IndexManager.java
+++ b/src/blur-core/src/main/java/org/apache/blur/manager/IndexManager.java
@@ -24,8 +24,6 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
@@ -39,8 +37,6 @@ import org.apache.blur.metrics.BlurMetrics;
 import org.apache.blur.metrics.QueryMetrics;
 import org.apache.blur.thrift.BException;
 import org.apache.blur.thrift.generated.BlurException;
-import org.apache.blur.thrift.generated.Schema;
-import org.apache.blur.thrift.generated.ScoreType;
 import org.apache.blur.utils.BlurExecutorCompletionService;
 import org.apache.blur.utils.BlurUtil;
 import org.apache.blur.utils.ForkJoin;
@@ -50,9 +46,7 @@ import org.apache.hadoop.io.BytesWritable;
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.StoredFieldVisitor;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
@@ -111,14 +105,6 @@ public class IndexManager {
     return split[0];
   }
 
-  private ScoreType getScoreType(ScoreType type) {
-    if (type == null) {
-      return ScoreType.SUPER;
-    }
-    return type;
-  }
-
-
   private static String getRowId(IndexReader reader, int docId) throws CorruptIndexException, IOException {
     reader.document(docId, new StoredFieldVisitor() {
       @Override
@@ -244,36 +230,6 @@ public class IndexManager {
     return new Term(columnFamily + "." + columnName, value);
   }
 
-  public Schema schema(String table) throws IOException {
-    Schema schema = new Schema().setTable(table);
-    schema.columnFamilies = new TreeMap<String, Set<String>>();
-    Map<String, BlurIndex> blurIndexes = _indexServer.getIndexes(table);
-    for (BlurIndex blurIndex : blurIndexes.values()) {
-      IndexReader reader = blurIndex.getIndexReader();
-      try {
-        FieldInfos mergedFieldInfos = MultiFields.getMergedFieldInfos(reader);
-        for (FieldInfo fieldInfo : mergedFieldInfos) {
-          String fieldName = fieldInfo.name;
-          int index = fieldName.indexOf('.');
-          if (index > 0) {
-            String columnFamily = fieldName.substring(0, index);
-            String column = fieldName.substring(index + 1);
-            Set<String> set = schema.columnFamilies.get(columnFamily);
-            if (set == null) {
-              set = new TreeSet<String>();
-              schema.columnFamilies.put(columnFamily, set);
-            }
-            set.add(column);
-          }
-        }
-      } finally {
-        // this will allow for closing of index
-        reader.decRef();
-      }
-    }
-    return schema;
-  }
-
   public void setStatusCleanupTimerDelay(long delay) {
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/IndexServer.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/IndexServer.java b/src/blur-core/src/main/java/org/apache/blur/manager/IndexServer.java
index 1097048..a0b2317 100644
--- a/src/blur-core/src/main/java/org/apache/blur/manager/IndexServer.java
+++ b/src/blur-core/src/main/java/org/apache/blur/manager/IndexServer.java
@@ -23,8 +23,6 @@ import java.util.SortedSet;
 
 import org.apache.blur.analysis.BlurAnalyzer;
 import org.apache.blur.manager.writer.BlurIndex;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.lucene.search.similarities.Similarity;
 
 
 public interface IndexServer {
@@ -68,15 +66,6 @@ public interface IndexServer {
   List<String> getShardList(String table);
 
   /**
-   * Gets the similarity object used by lucene for this table.
-   * 
-   * @param table
-   *          the table name.
-   * @return the similarity object.
-   */
-  Similarity getSimilarity(String table);
-
-  /**
    * Gets the status of the table.
    * 
    * @param table
@@ -119,24 +108,6 @@ public interface IndexServer {
    */
   int getShardCount(String table);
 
-  /**
-   * Gets the compress codec for the given table.
-   * 
-   * @param table
-   *          the name of the table.
-   * @return the {@link CompressionCodec}
-   */
-  CompressionCodec getCompressionCodec(String table);
-
-  /**
-   * Get the compression block size.
-   * 
-   * @param table
-   *          the name of the table.
-   * @return the block size.
-   */
-  int getCompressionBlockSize(String table);
-
   // Metrics
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/QueryParserUtil.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/QueryParserUtil.java b/src/blur-core/src/main/java/org/apache/blur/manager/QueryParserUtil.java
deleted file mode 100644
index 469431a..0000000
--- a/src/blur-core/src/main/java/org/apache/blur/manager/QueryParserUtil.java
+++ /dev/null
@@ -1,64 +0,0 @@
-package org.apache.blur.manager;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import static org.apache.blur.lucene.LuceneVersionConstant.LUCENE_VERSION;
-
-import org.apache.blur.analysis.BlurAnalyzer;
-import org.apache.blur.lucene.search.SuperParser;
-import org.apache.blur.thrift.generated.BlurException;
-import org.apache.blur.thrift.generated.ScoreType;
-import org.apache.lucene.queryparser.classic.ParseException;
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.FilteredQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryWrapperFilter;
-
-public class QueryParserUtil {
-
-  public static Query parseQuery(String query, boolean superQueryOn, BlurAnalyzer analyzer, Filter postFilter, Filter preFilter, ScoreType scoreType) throws ParseException   {
-    Query result = new SuperParser(LUCENE_VERSION, analyzer, superQueryOn, preFilter, scoreType).parse(query);
-    if (postFilter == null) {
-      return result;
-    }
-    return new FilteredQuery(result, postFilter);
-  }
-
-  public static Filter parseFilter(String table, String filterStr, boolean superQueryOn, BlurAnalyzer analyzer, BlurFilterCache filterCache) throws ParseException, BlurException {
-    if (filterStr == null) {
-      return null;
-    }
-    synchronized (filterCache) {
-      Filter filter;
-      if (superQueryOn) {
-        filter = filterCache.fetchPostFilter(table, filterStr);
-      } else {
-        filter = filterCache.fetchPreFilter(table, filterStr);
-      }
-      if (filter != null) {
-        return filter;
-      }
-      filter = new QueryWrapperFilter(new SuperParser(LUCENE_VERSION, analyzer, superQueryOn, null, ScoreType.CONSTANT).parse(filterStr));
-      if (superQueryOn) {
-        filter = filterCache.storePostFilter(table, filterStr, filter);
-      } else {
-        filter = filterCache.storePreFilter(table, filterStr, filter);
-      }
-      return filter;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/clusterstatus/ClusterStatus.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/clusterstatus/ClusterStatus.java b/src/blur-core/src/main/java/org/apache/blur/manager/clusterstatus/ClusterStatus.java
index f14ef11..b5d48a0 100644
--- a/src/blur-core/src/main/java/org/apache/blur/manager/clusterstatus/ClusterStatus.java
+++ b/src/blur-core/src/main/java/org/apache/blur/manager/clusterstatus/ClusterStatus.java
@@ -18,62 +18,38 @@ package org.apache.blur.manager.clusterstatus;
  */
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Set;
 
 import org.apache.blur.thrift.generated.TableDescriptor;
 
-
 public abstract class ClusterStatus {
 
-  public abstract List<String> getOnlineShardServers(boolean useCache, String cluster);
-
-  public abstract List<String> getControllerServerList();
-
-  public abstract List<String> getShardServerList(String cluster);
-
-  public abstract List<String> getClusterList(boolean useCache);
-
-  public abstract TableDescriptor getTableDescriptor(boolean useCache, String cluster, String table);
+  public abstract List<String> getOnlineServers(boolean useCache);
 
-  public final List<String> getTableList(boolean useCache) {
-    List<String> tables = new ArrayList<String>();
-    for (String cluster : getClusterList(useCache)) {
-      tables.addAll(getTableList(useCache, cluster));
-    }
-    return tables;
-  }
+  public abstract List<String> getServerList(boolean useCache);
 
-  public abstract String getCluster(boolean useCache, String table);
+  public abstract TableDescriptor getTableDescriptor(boolean useCache, String table);
 
-  public abstract boolean isEnabled(boolean useCache, String cluster, String table);
+  public abstract String getClusterName();
 
-  public abstract boolean exists(boolean useCache, String cluster, String table);
+  public abstract boolean exists(boolean useCache, String table);
 
-  public abstract boolean isInSafeMode(boolean useCache, String cluster);
+  public abstract boolean isInSafeMode(boolean useCache);
 
-  public List<String> getOfflineShardServers(boolean useCache, String cluster) {
-    List<String> shardServerList = new ArrayList<String>(getShardServerList(cluster));
-    shardServerList.removeAll(getOnlineShardServers(useCache, cluster));
+  public List<String> getOfflineServers(boolean useCache) {
+    List<String> shardServerList = new ArrayList<String>(getServerList(useCache));
+    shardServerList.removeAll(getOnlineServers(useCache));
     return shardServerList;
   }
 
-  public abstract int getShardCount(boolean useCache, String cluster, String table);
-
-  public abstract boolean isBlockCacheEnabled(String cluster, String table);
-
-  public abstract Set<String> getBlockCacheFileTypes(String cluster, String table);
-
-  public abstract List<String> getTableList(boolean useCache, String cluster);
-
-  public abstract boolean isReadOnly(boolean useCache, String cluster, String table);
+  public abstract List<String> getTableList(boolean useCache);
 
   public abstract void createTable(TableDescriptor tableDescriptor);
 
-  public abstract void disableTable(String cluster, String table);
+  public abstract void disableTable(String table);
 
-  public abstract void enableTable(String cluster, String table);
+  public abstract void enableTable(String table);
 
-  public abstract void removeTable(String cluster, String table, boolean deleteIndexFiles);
+  public abstract void removeTable(String table, boolean deleteIndexFiles);
 
   public abstract boolean isOpen();
 

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/clusterstatus/ZookeeperClusterStatus.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/clusterstatus/ZookeeperClusterStatus.java b/src/blur-core/src/main/java/org/apache/blur/manager/clusterstatus/ZookeeperClusterStatus.java
index 0c595f4..6585e51 100644
--- a/src/blur-core/src/main/java/org/apache/blur/manager/clusterstatus/ZookeeperClusterStatus.java
+++ b/src/blur-core/src/main/java/org/apache/blur/manager/clusterstatus/ZookeeperClusterStatus.java
@@ -19,12 +19,10 @@ package org.apache.blur.manager.clusterstatus;
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -34,9 +32,6 @@ import java.util.concurrent.atomic.AtomicReference;
 import org.apache.blur.analysis.BlurAnalyzer;
 import org.apache.blur.log.Log;
 import org.apache.blur.log.LogFactory;
-import org.apache.blur.lucene.search.FairSimilarity;
-import org.apache.blur.thrift.generated.AnalyzerDefinition;
-import org.apache.blur.thrift.generated.ColumnPreCache;
 import org.apache.blur.thrift.generated.TableDescriptor;
 import org.apache.blur.utils.BlurUtil;
 import org.apache.blur.zookeeper.WatchChildren;
@@ -44,13 +39,6 @@ import org.apache.blur.zookeeper.WatchChildren.OnChange;
 import org.apache.blur.zookeeper.WatchNodeData;
 import org.apache.blur.zookeeper.WatchNodeExistance;
 import org.apache.blur.zookeeper.ZkUtils;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.DefaultCodec;
-import org.apache.lucene.search.similarities.Similarity;
-import org.apache.thrift.TBase;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TJSONProtocol;
-import org.apache.thrift.transport.TMemoryInputTransport;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.WatchedEvent;
@@ -79,10 +67,12 @@ public class ZookeeperClusterStatus extends ClusterStatus {
   private ConcurrentMap<String, WatchNodeData> _safeModeDataWatchers = new ConcurrentHashMap<String, WatchNodeData>();
   private ConcurrentMap<String, WatchNodeExistance> _enabledWatchNodeExistance = new ConcurrentHashMap<String, WatchNodeExistance>();
   private ConcurrentMap<String, WatchNodeExistance> _readOnlyWatchNodeExistance = new ConcurrentHashMap<String, WatchNodeExistance>();
+  private String _cluster;
 
-  public ZookeeperClusterStatus(ZooKeeper zooKeeper) {
+  public ZookeeperClusterStatus(String cluster, ZooKeeper zooKeeper) {
     _zk = zooKeeper;
     _running.set(true);
+    _cluster = cluster;
     watchForClusters();
     try {
       Thread.sleep(1000);
@@ -214,8 +204,8 @@ public class ZookeeperClusterStatus extends ClusterStatus {
     _clusterWatcher = new WatchChildren(_zk, ZookeeperPathConstants.getClustersPath()).watch(new Clusters());
   }
 
-  public ZookeeperClusterStatus(String connectionStr) throws IOException {
-    this(new ZooKeeper(connectionStr, 30000, new Watcher() {
+  public ZookeeperClusterStatus(String cluster, String connectionStr) throws IOException {
+    this(cluster, new ZooKeeper(connectionStr, 30000, new Watcher() {
       @Override
       public void process(WatchedEvent event) {
 
@@ -227,25 +217,6 @@ public class ZookeeperClusterStatus extends ClusterStatus {
     return cluster + "." + table;
   }
 
-  @Override
-  public List<String> getClusterList(boolean useCache) {
-    if (useCache) {
-      return new ArrayList<String>(_clusters.get());
-    }
-    long s = System.nanoTime();
-    try {
-      checkIfOpen();
-      return _zk.getChildren(ZookeeperPathConstants.getClustersPath(), false);
-    } catch (KeeperException e) {
-      throw new RuntimeException(e);
-    } catch (InterruptedException e) {
-      throw new RuntimeException(e);
-    } finally {
-      long e = System.nanoTime();
-      LOG.debug("trace getClusterList [" + (e - s) / 1000000.0 + " ms]");
-    }
-  }
-
   private void checkIfOpen() {
     if (_running.get()) {
       return;
@@ -254,36 +225,20 @@ public class ZookeeperClusterStatus extends ClusterStatus {
   }
 
   @Override
-  public List<String> getControllerServerList() {
-    long s = System.nanoTime();
-    try {
-      checkIfOpen();
-      return _zk.getChildren(ZookeeperPathConstants.getOnlineControllersPath(), false);
-    } catch (KeeperException e) {
-      throw new RuntimeException(e);
-    } catch (InterruptedException e) {
-      throw new RuntimeException(e);
-    } finally {
-      long e = System.nanoTime();
-      LOG.debug("trace getControllerServerList [" + (e - s) / 1000000.0 + " ms]");
-    }
-  }
-
-  @Override
-  public List<String> getOnlineShardServers(boolean useCache, String cluster) {
+  public List<String> getOnlineServers(boolean useCache) {
     if (useCache) {
-      List<String> shards = _onlineShardsNodes.get(cluster);
+      List<String> shards = _onlineShardsNodes.get(_cluster);
       if (shards != null) {
         return shards;
       } else {
-        watchForOnlineShardNodes(cluster);
+        watchForOnlineShardNodes(_cluster);
       }
     }
 
     long s = System.nanoTime();
     try {
       checkIfOpen();
-      return _zk.getChildren(ZookeeperPathConstants.getClustersPath() + "/" + cluster + "/online/shard-nodes", false);
+      return _zk.getChildren(ZookeeperPathConstants.getClustersPath() + "/" + _cluster + "/online/shard-nodes", false);
     } catch (KeeperException e) {
       throw new RuntimeException(e);
     } catch (InterruptedException e) {
@@ -308,11 +263,11 @@ public class ZookeeperClusterStatus extends ClusterStatus {
   }
 
   @Override
-  public List<String> getShardServerList(String cluster) {
+  public List<String> getServerList(boolean useCache) {
     long s = System.nanoTime();
     try {
       checkIfOpen();
-      return _zk.getChildren(ZookeeperPathConstants.getClustersPath() + "/" + cluster + "/shard-nodes", false);
+      return _zk.getChildren(ZookeeperPathConstants.getClustersPath() + "/" + _cluster + "/shard-nodes", false);
     } catch (KeeperException e) {
       throw new RuntimeException(e);
     } catch (InterruptedException e) {
@@ -324,9 +279,9 @@ public class ZookeeperClusterStatus extends ClusterStatus {
   }
 
   @Override
-  public boolean exists(boolean useCache, String cluster, String table) {
+  public boolean exists(boolean useCache, String table) {
     if (useCache) {
-      Set<String> tables = _tablesPerCluster.get(cluster);
+      Set<String> tables = _tablesPerCluster.get(_cluster);
       if (tables != null) {
         if (tables.contains(table)) {
           return true;
@@ -336,7 +291,7 @@ public class ZookeeperClusterStatus extends ClusterStatus {
     long s = System.nanoTime();
     try {
       checkIfOpen();
-      if (_zk.exists(ZookeeperPathConstants.getTablePath(cluster, table), false) == null) {
+      if (_zk.exists(ZookeeperPathConstants.getTablePath(_cluster, table), false) == null) {
         return false;
       }
       return true;
@@ -350,39 +305,12 @@ public class ZookeeperClusterStatus extends ClusterStatus {
     }
   }
 
-  @Override
-  public boolean isEnabled(boolean useCache, String cluster, String table) {
-    if (useCache) {
-      Boolean e = _enabled.get(getClusterTableKey(cluster, table));
-      if (e != null) {
-        return e;
-      }
-    }
-    long s = System.nanoTime();
-    String tablePathIsEnabled = ZookeeperPathConstants.getTableEnabledPath(cluster, table);
-    try {
-      checkIfOpen();
-      if (_zk.exists(tablePathIsEnabled, false) == null) {
-        return false;
-      }
-    } catch (KeeperException e) {
-      throw new RuntimeException(e);
-    } catch (InterruptedException e) {
-      throw new RuntimeException(e);
-    } finally {
-      long e = System.nanoTime();
-      LOG.debug("trace isEnabled took [" + (e - s) / 1000000.0 + " ms]");
-    }
-    return true;
-  }
-
   private Map<String, TableDescriptor> _tableDescriptorCache = new ConcurrentHashMap<String, TableDescriptor>();
 
   @Override
-  public TableDescriptor getTableDescriptor(boolean useCache, String cluster, String table) {
+  public TableDescriptor getTableDescriptor(boolean useCache, String table) {
     if (useCache) {
       TableDescriptor tableDescriptor = _tableDescriptorCache.get(table);
-      updateReadOnlyAndEnabled(useCache, tableDescriptor, cluster, table);
       if (tableDescriptor != null) {
         return tableDescriptor;
       }
@@ -391,20 +319,9 @@ public class ZookeeperClusterStatus extends ClusterStatus {
     TableDescriptor tableDescriptor = new TableDescriptor();
     try {
       checkIfOpen();
-      tableDescriptor.shardCount = Integer.parseInt(new String(getData(ZookeeperPathConstants.getTableShardCountPath(cluster, table))));
-      tableDescriptor.tableUri = new String(getData(ZookeeperPathConstants.getTableUriPath(cluster, table)));
-      tableDescriptor.compressionClass = new String(getData(ZookeeperPathConstants.getTableCompressionCodecPath(cluster, table)));
-      tableDescriptor.compressionBlockSize = Integer.parseInt(new String(getData(ZookeeperPathConstants.getTableCompressionBlockSizePath(cluster, table))));
-      tableDescriptor.analyzerDefinition = fromBytes(getData(ZookeeperPathConstants.getTablePath(cluster, table)), AnalyzerDefinition.class);
-      tableDescriptor.blockCaching = isBlockCacheEnabled(cluster, table);
-      tableDescriptor.blockCachingFileTypes = getBlockCacheFileTypes(cluster, table);
-      tableDescriptor.name = table;
-      tableDescriptor.columnPreCache = fromBytes(getData(ZookeeperPathConstants.getTableColumnsToPreCache(cluster, table)), ColumnPreCache.class);
-      byte[] data = getData(ZookeeperPathConstants.getTableSimilarityPath(cluster, table));
-      if (data != null) {
-        tableDescriptor.similarityClass = new String(data);
-      }
-      updateReadOnlyAndEnabled(useCache, tableDescriptor, cluster, table);
+      String blurTablePath = ZookeeperPathConstants.getTablePath(getClusterName(), table);
+      byte[] data = getData(blurTablePath);
+      BlurUtil.write(data, tableDescriptor);
     } catch (KeeperException e) {
       throw new RuntimeException(e);
     } catch (InterruptedException e) {
@@ -413,39 +330,10 @@ public class ZookeeperClusterStatus extends ClusterStatus {
       long e = System.nanoTime();
       LOG.debug("trace getTableDescriptor took [" + (e - s) / 1000000.0 + " ms]");
     }
-    tableDescriptor.cluster = cluster;
     _tableDescriptorCache.put(table, tableDescriptor);
     return tableDescriptor;
   }
 
-  private void updateReadOnlyAndEnabled(boolean useCache, TableDescriptor tableDescriptor, String cluster, String table) {
-    if (tableDescriptor != null) {
-      tableDescriptor.setReadOnly(isReadOnly(useCache, cluster, table));
-      tableDescriptor.setIsEnabled(isEnabled(useCache, cluster, table));
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private <T extends TBase<?, ?>> T fromBytes(byte[] data, Class<T> clazz) {
-    try {
-      if (data == null) {
-        return null;
-      }
-      TBase<?, ?> base = clazz.newInstance();
-      TMemoryInputTransport trans = new TMemoryInputTransport(data);
-      TJSONProtocol protocol = new TJSONProtocol(trans);
-      base.read(protocol);
-      trans.close();
-      return (T) base;
-    } catch (InstantiationException e) {
-      throw new RuntimeException(e);
-    } catch (IllegalAccessException e) {
-      throw new RuntimeException(e);
-    } catch (TException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
   private byte[] getData(String path) throws KeeperException, InterruptedException {
     Stat stat = _zk.exists(path, false);
     if (stat == null) {
@@ -455,9 +343,9 @@ public class ZookeeperClusterStatus extends ClusterStatus {
   }
 
   @Override
-  public List<String> getTableList(boolean useCache, String cluster) {
+  public List<String> getTableList(boolean useCache) {
     if (useCache) {
-      Set<String> tables = _tablesPerCluster.get(cluster);
+      Set<String> tables = _tablesPerCluster.get(_cluster);
       if (tables != null) {
         return new ArrayList<String>(tables);
       }
@@ -465,7 +353,7 @@ public class ZookeeperClusterStatus extends ClusterStatus {
     long s = System.nanoTime();
     try {
       checkIfOpen();
-      return _zk.getChildren(ZookeeperPathConstants.getTablesPath(cluster), false);
+      return _zk.getChildren(ZookeeperPathConstants.getTablesPath(_cluster), false);
     } catch (KeeperException e) {
       throw new RuntimeException(e);
     } catch (InterruptedException e) {
@@ -505,40 +393,9 @@ public class ZookeeperClusterStatus extends ClusterStatus {
   }
 
   @Override
-  public String getCluster(boolean useCache, String table) {
-    if (useCache) {
-      for (Entry<String, Set<String>> entry : _tablesPerCluster.entrySet()) {
-        if (entry.getValue().contains(table)) {
-          return entry.getKey();
-        }
-      }
-    }
-    List<String> clusterList = getClusterList(useCache);
-    for (String cluster : clusterList) {
-      long s = System.nanoTime();
-      try {
-        checkIfOpen();
-        Stat stat = _zk.exists(ZookeeperPathConstants.getTablePath(cluster, table), false);
-        if (stat != null) {
-          // _tableToClusterCache.put(table, cluster);
-          return cluster;
-        }
-      } catch (KeeperException e) {
-        throw new RuntimeException(e);
-      } catch (InterruptedException e) {
-        throw new RuntimeException(e);
-      } finally {
-        long e = System.nanoTime();
-        LOG.debug("trace getCluster took [" + (e - s) / 1000000.0 + " ms]");
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public boolean isInSafeMode(boolean useCache, String cluster) {
+  public boolean isInSafeMode(boolean useCache) {
     if (useCache) {
-      Long safeModeTimestamp = _safeModeMap.get(cluster);
+      Long safeModeTimestamp = _safeModeMap.get(_cluster);
       if (safeModeTimestamp != null && safeModeTimestamp != Long.MIN_VALUE) {
         return safeModeTimestamp < System.currentTimeMillis() ? false : true;
       }
@@ -546,7 +403,7 @@ public class ZookeeperClusterStatus extends ClusterStatus {
     long s = System.nanoTime();
     try {
       checkIfOpen();
-      String blurSafemodePath = ZookeeperPathConstants.getSafemodePath(cluster);
+      String blurSafemodePath = ZookeeperPathConstants.getSafemodePath(_cluster);
       Stat stat = _zk.exists(blurSafemodePath, false);
       if (stat == null) {
         return false;
@@ -572,146 +429,21 @@ public class ZookeeperClusterStatus extends ClusterStatus {
   }
 
   @Override
-  public int getShardCount(boolean useCache, String cluster, String table) {
-    if (useCache) {
-      TableDescriptor tableDescriptor = getTableDescriptor(true, cluster, table);
-      return tableDescriptor.shardCount;
-    }
-    long s = System.nanoTime();
-    try {
-      return Integer.parseInt(new String(getData(ZookeeperPathConstants.getTableShardCountPath(cluster, table))));
-    } catch (NumberFormatException e) {
-      throw new RuntimeException(e);
-    } catch (KeeperException e) {
-      throw new RuntimeException(e);
-    } catch (InterruptedException e) {
-      throw new RuntimeException(e);
-    } finally {
-      long e = System.nanoTime();
-      LOG.debug("trace getShardCount took [" + (e - s) / 1000000.0 + " ms]");
-    }
-  }
-
-  @Override
-  public Set<String> getBlockCacheFileTypes(String cluster, String table) {
-    long s = System.nanoTime();
-    try {
-      byte[] data = getData(ZookeeperPathConstants.getTableBlockCachingFileTypesPath(cluster, table));
-      if (data == null) {
-        return null;
-      }
-      String str = new String(data);
-      if (str.isEmpty()) {
-        return null;
-      }
-      Set<String> types = new HashSet<String>(Arrays.asList(str.split(",")));
-      if (types.isEmpty()) {
-        return null;
-      }
-      return types;
-    } catch (KeeperException e) {
-      throw new RuntimeException(e);
-    } catch (InterruptedException e) {
-      throw new RuntimeException(e);
-    } finally {
-      long e = System.nanoTime();
-      LOG.debug("trace getBlockCacheFileTypes took [" + (e - s) / 1000000.0 + " ms]");
-    }
-  }
-
-  @Override
-  public boolean isBlockCacheEnabled(String cluster, String table) {
-    long s = System.nanoTime();
-    try {
-      checkIfOpen();
-      if (_zk.exists(ZookeeperPathConstants.getTableBlockCachingFileTypesPath(cluster, table), false) == null) {
-        return false;
-      }
-    } catch (KeeperException e) {
-      throw new RuntimeException(e);
-    } catch (InterruptedException e) {
-      throw new RuntimeException(e);
-    } finally {
-      long e = System.nanoTime();
-      LOG.debug("trace isBlockCacheEnabled took [" + (e - s) / 1000000.0 + " ms]");
-    }
-    return true;
-  }
-
-  @Override
-  public boolean isReadOnly(boolean useCache, String cluster, String table) {
-    if (useCache) {
-      Boolean ro = _readOnly.get(getClusterTableKey(cluster, table));
-      if (ro != null) {
-        return ro;
-      }
-    }
-    long s = System.nanoTime();
-    String path = ZookeeperPathConstants.getTableReadOnlyPath(cluster, table);
-    try {
-      checkIfOpen();
-      if (_zk.exists(path, false) == null) {
-        return false;
-      }
-      return true;
-    } catch (KeeperException e) {
-      throw new RuntimeException(e);
-    } catch (InterruptedException e) {
-      throw new RuntimeException(e);
-    } finally {
-      long e = System.nanoTime();
-      LOG.debug("trace isReadOnly took [" + (e - s) / 1000000.0 + " ms]");
-    }
-  }
-
-  @Override
   public void createTable(TableDescriptor tableDescriptor) {
     long s = System.nanoTime();
     try {
       checkIfOpen();
-      if (tableDescriptor.getCompressionClass() == null) {
-        tableDescriptor.setCompressionClass(DefaultCodec.class.getName());
-      }
-      if (tableDescriptor.getSimilarityClass() == null) {
-        tableDescriptor.setSimilarityClass(FairSimilarity.class.getName());
-      }
-      if (tableDescriptor.getAnalyzerDefinition() == null) {
-        tableDescriptor.setAnalyzerDefinition(new AnalyzerDefinition());
-      }
-      String table = BlurUtil.nullCheck(tableDescriptor.name, "tableDescriptor.name cannot be null.");
-      String cluster = BlurUtil.nullCheck(tableDescriptor.cluster, "tableDescriptor.cluster cannot be null.");
-      BlurAnalyzer analyzer = new BlurAnalyzer(BlurUtil.nullCheck(tableDescriptor.analyzerDefinition, "tableDescriptor.analyzerDefinition cannot be null."));
-      String uri = BlurUtil.nullCheck(tableDescriptor.tableUri, "tableDescriptor.tableUri cannot be null.");
+      String table = BlurUtil.nullCheck(tableDescriptor.getName(), "tableDescriptor.name cannot be null.");
+      BlurAnalyzer analyzer = new BlurAnalyzer(BlurUtil.nullCheck(tableDescriptor.getAnalyzer(), "tableDescriptor.analyzerDefinition cannot be null."));
+      String uri = BlurUtil.nullCheck(tableDescriptor.getStoragePath(), "tableDescriptor.tableUri cannot be null.");
       int shardCount = BlurUtil.zeroCheck(tableDescriptor.shardCount, "tableDescriptor.shardCount cannot be less than 1");
-      CompressionCodec compressionCodec = BlurUtil.getInstance(tableDescriptor.compressionClass, CompressionCodec.class);
-      // @TODO check block size
-      int compressionBlockSize = tableDescriptor.compressionBlockSize;
-      Similarity similarity = BlurUtil.getInstance(tableDescriptor.similarityClass, Similarity.class);
-      boolean blockCaching = tableDescriptor.blockCaching;
-      Set<String> blockCachingFileTypes = tableDescriptor.blockCachingFileTypes;
-      String blurTablePath = ZookeeperPathConstants.getTablePath(cluster, table);
-      ColumnPreCache columnPreCache = tableDescriptor.columnPreCache;
+      String blurTablePath = ZookeeperPathConstants.getTablePath(getClusterName(), table);
 
       if (_zk.exists(blurTablePath, false) != null) {
         throw new IOException("Table [" + table + "] already exists.");
       }
       BlurUtil.setupFileSystem(uri, shardCount);
-      BlurUtil.createPath(_zk, blurTablePath, analyzer.toJSON().getBytes());
-      BlurUtil.createPath(_zk, ZookeeperPathConstants.getTableColumnsToPreCache(cluster, table), BlurUtil.read(columnPreCache));
-      BlurUtil.createPath(_zk, ZookeeperPathConstants.getTableUriPath(cluster, table), uri.getBytes());
-      BlurUtil.createPath(_zk, ZookeeperPathConstants.getTableShardCountPath(cluster, table), Integer.toString(shardCount).getBytes());
-      BlurUtil.createPath(_zk, ZookeeperPathConstants.getTableCompressionCodecPath(cluster, table), compressionCodec.getClass().getName().getBytes());
-      BlurUtil.createPath(_zk, ZookeeperPathConstants.getTableCompressionBlockSizePath(cluster, table), Integer.toString(compressionBlockSize).getBytes());
-      BlurUtil.createPath(_zk, ZookeeperPathConstants.getTableSimilarityPath(cluster, table), similarity.getClass().getName().getBytes());
-      BlurUtil.createPath(_zk, ZookeeperPathConstants.getLockPath(cluster, table), null);
-      BlurUtil.createPath(_zk, ZookeeperPathConstants.getTableFieldNamesPath(cluster, table), null);
-      if (tableDescriptor.readOnly) {
-        BlurUtil.createPath(_zk, ZookeeperPathConstants.getTableReadOnlyPath(cluster, table), null);
-      }
-      if (blockCaching) {
-        BlurUtil.createPath(_zk, ZookeeperPathConstants.getTableBlockCachingPath(cluster, table), null);
-      }
-      BlurUtil.createPath(_zk, ZookeeperPathConstants.getTableBlockCachingFileTypesPath(cluster, table), toBytes(blockCachingFileTypes));
+      BlurUtil.createPath(_zk, blurTablePath, BlurUtil.read(tableDescriptor));
     } catch (IOException e) {
       throw new RuntimeException(e);
     } catch (KeeperException e) {
@@ -725,14 +457,14 @@ public class ZookeeperClusterStatus extends ClusterStatus {
   }
 
   @Override
-  public void disableTable(String cluster, String table) {
+  public void disableTable(String table) {
     long s = System.nanoTime();
     try {
       checkIfOpen();
-      if (_zk.exists(ZookeeperPathConstants.getTablePath(cluster, table), false) == null) {
+      if (_zk.exists(ZookeeperPathConstants.getTablePath(_cluster, table), false) == null) {
         throw new IOException("Table [" + table + "] does not exist.");
       }
-      String blurTableEnabledPath = ZookeeperPathConstants.getTableEnabledPath(cluster, table);
+      String blurTableEnabledPath = ZookeeperPathConstants.getTableEnabledPath(_cluster, table);
       if (_zk.exists(blurTableEnabledPath, false) == null) {
         throw new IOException("Table [" + table + "] already disabled.");
       }
@@ -750,14 +482,14 @@ public class ZookeeperClusterStatus extends ClusterStatus {
   }
 
   @Override
-  public void enableTable(String cluster, String table) {
+  public void enableTable(String table) {
     long s = System.nanoTime();
     try {
       checkIfOpen();
-      if (_zk.exists(ZookeeperPathConstants.getTablePath(cluster, table), false) == null) {
+      if (_zk.exists(ZookeeperPathConstants.getTablePath(_cluster, table), false) == null) {
         throw new IOException("Table [" + table + "] does not exist.");
       }
-      String blurTableEnabledPath = ZookeeperPathConstants.getTableEnabledPath(cluster, table);
+      String blurTableEnabledPath = ZookeeperPathConstants.getTableEnabledPath(_cluster, table);
       if (_zk.exists(blurTableEnabledPath, false) != null) {
         throw new IOException("Table [" + table + "] already enabled.");
       }
@@ -775,18 +507,18 @@ public class ZookeeperClusterStatus extends ClusterStatus {
   }
 
   @Override
-  public void removeTable(String cluster, String table, boolean deleteIndexFiles) {
+  public void removeTable(String table, boolean deleteIndexFiles) {
     long s = System.nanoTime();
     try {
       checkIfOpen();
-      String blurTablePath = ZookeeperPathConstants.getTablePath(cluster, table);
+      String blurTablePath = ZookeeperPathConstants.getTablePath(_cluster, table);
       if (_zk.exists(blurTablePath, false) == null) {
         throw new IOException("Table [" + table + "] does not exist.");
       }
-      if (_zk.exists(ZookeeperPathConstants.getTableEnabledPath(cluster, table), false) != null) {
+      if (_zk.exists(ZookeeperPathConstants.getTableEnabledPath(_cluster, table), false) != null) {
         throw new IOException("Table [" + table + "] must be disabled before it can be removed.");
       }
-      byte[] data = getData(ZookeeperPathConstants.getTableUriPath(cluster, table));
+      byte[] data = getData(ZookeeperPathConstants.getTableUriPath(_cluster, table));
       String uri = new String(data);
       BlurUtil.removeAll(_zk, blurTablePath);
       if (deleteIndexFiles) {
@@ -804,19 +536,13 @@ public class ZookeeperClusterStatus extends ClusterStatus {
     }
   }
 
-  private static byte[] toBytes(Set<String> blockCachingFileTypes) {
-    if (blockCachingFileTypes == null || blockCachingFileTypes.isEmpty()) {
-      return null;
-    }
-    StringBuilder builder = new StringBuilder();
-    for (String type : blockCachingFileTypes) {
-      builder.append(type).append(',');
-    }
-    return builder.substring(0, builder.length() - 1).getBytes();
-  }
-
   @Override
   public boolean isOpen() {
     return _running.get();
   }
+
+  @Override
+  public String getClusterName() {
+    return _cluster;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/indexserver/DistributedIndexServer.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/indexserver/DistributedIndexServer.java b/src/blur-core/src/main/java/org/apache/blur/manager/indexserver/DistributedIndexServer.java
index 6d0c8ea..c59e93a 100644
--- a/src/blur-core/src/main/java/org/apache/blur/manager/indexserver/DistributedIndexServer.java
+++ b/src/blur-core/src/main/java/org/apache/blur/manager/indexserver/DistributedIndexServer.java
@@ -42,7 +42,6 @@ import org.apache.blur.analysis.BlurAnalyzer;
 import org.apache.blur.concurrent.Executors;
 import org.apache.blur.log.Log;
 import org.apache.blur.log.LogFactory;
-import org.apache.blur.lucene.search.FairSimilarity;
 import org.apache.blur.lucene.store.refcounter.DirectoryReferenceFileGC;
 import org.apache.blur.manager.BlurFilterCache;
 import org.apache.blur.manager.clusterstatus.ClusterStatus;
@@ -62,13 +61,11 @@ import org.apache.blur.utils.BlurConstants;
 import org.apache.blur.utils.BlurUtil;
 import org.apache.blur.zookeeper.WatchChildren;
 import org.apache.blur.zookeeper.WatchChildren.OnChange;
-import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.lucene.index.IndexDeletionPolicy;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.search.similarities.Similarity;
@@ -239,7 +236,7 @@ public class DistributedIndexServer extends AbstractIndexServer {
 
       private void warmup() {
         if (_running.get()) {
-          List<String> tableList = _clusterStatus.getTableList(false, _cluster);
+          List<String> tableList = _clusterStatus.getTableList(false);
           _blurMetrics.tableCount.set(tableList.size());
           long indexCount = 0;
           AtomicLong segmentCount = new AtomicLong();
@@ -366,12 +363,13 @@ public class DistributedIndexServer extends AbstractIndexServer {
     List<String> tables = new ArrayList<String>(map.keySet());
     Map<String, T> removed = new HashMap<String, T>();
     for (String table : tables) {
-      if (!_clusterStatus.exists(true, _cluster, table)) {
+      if (!_clusterStatus.exists(true, table)) {
         removed.put(table, map.remove(table));
       }
     }
     for (String table : tables) {
-      if (!_clusterStatus.isEnabled(true, _cluster, table)) {
+      TableDescriptor tableDescriptor = _clusterStatus.getTableDescriptor(true, table);
+      if (!tableDescriptor.isEnabled()) {
         removed.put(table, map.remove(table));
       }
     }
@@ -414,28 +412,14 @@ public class DistributedIndexServer extends AbstractIndexServer {
     checkTable(table);
     BlurAnalyzer blurAnalyzer = _tableAnalyzers.get(table);
     if (blurAnalyzer == null) {
-      TableDescriptor descriptor = getTableDescriptor(table);
-      blurAnalyzer = new BlurAnalyzer(descriptor.analyzerDefinition);
+      TableDescriptor tableDescriptor = _clusterStatus.getTableDescriptor(true, table);
+      blurAnalyzer = new BlurAnalyzer(tableDescriptor.getAnalyzer());
       _tableAnalyzers.put(table, blurAnalyzer);
     }
     return blurAnalyzer;
   }
 
   @Override
-  public int getCompressionBlockSize(String table) {
-    checkTable(table);
-    TableDescriptor descriptor = getTableDescriptor(table);
-    return descriptor.compressionBlockSize;
-  }
-
-  @Override
-  public CompressionCodec getCompressionCodec(String table) {
-    checkTable(table);
-    TableDescriptor descriptor = getTableDescriptor(table);
-    return getInstance(descriptor.compressionClass, CompressionCodec.class);
-  }
-
-  @Override
   public SortedSet<String> getShardListCurrentServerOnly(String table) throws IOException {
     return new TreeSet<String>(getShardsToServe(table));
   }
@@ -466,7 +450,8 @@ public class DistributedIndexServer extends AbstractIndexServer {
 
   private BlurIndex openShard(String table, String shard) throws IOException {
     LOG.info("Opening shard [{0}] for table [{1}]", shard, table);
-    Path tablePath = new Path(getTableDescriptor(table).tableUri);
+    TableDescriptor tableDescriptor = _clusterStatus.getTableDescriptor(true, table);
+    Path tablePath = new Path(tableDescriptor.getStoragePath());
     Path walTablePath = new Path(tablePath, LOGS);
     Path hdfsDirPath = new Path(tablePath, shard);
 
@@ -474,33 +459,10 @@ public class DistributedIndexServer extends AbstractIndexServer {
 
     Directory directory = new HdfsDirectory(_configuration, hdfsDirPath);
     directory.setLockFactory(lockFactory);
-
-    TableDescriptor descriptor = _clusterStatus.getTableDescriptor(true, _cluster, table);
-    String compressionClass = descriptor.compressionClass;
-    int compressionBlockSize = descriptor.compressionBlockSize;
-    if (compressionClass != null) {
-//      throw new RuntimeException("Not supported yet");
-      LOG.error("Not supported yet");
-//      CompressionCodec compressionCodec;
-//      try {
-//        compressionCodec = BlurUtil.getInstance(compressionClass, CompressionCodec.class);
-//        directory = new CompressedFieldDataDirectory(directory, compressionCodec, compressionBlockSize);
-//      } catch (Exception e) {
-//        throw new IOException(e);
-//      }
-    }
-
-    Directory dir;
-    boolean blockCacheEnabled = _clusterStatus.isBlockCacheEnabled(_cluster, table);
-    if (blockCacheEnabled) {
-      Set<String> blockCacheFileTypes = _clusterStatus.getBlockCacheFileTypes(_cluster, table);
-      dir = new BlockDirectory(table + "_" + shard, directory, _cache, blockCacheFileTypes);
-    } else {
-      dir = directory;
-    }
+    Directory dir = new BlockDirectory(table + "_" + shard, directory, _cache);
 
     BlurIndex index;
-    if (_clusterStatus.isReadOnly(true, _cluster, table)) {
+    if (tableDescriptor.isReadOnly()) {
       BlurIndexReader reader = new BlurIndexReader();
       reader.setCloser(_closer);
       reader.setAnalyzer(getAnalyzer(table));
@@ -509,7 +471,6 @@ public class DistributedIndexServer extends AbstractIndexServer {
       reader.setShard(shard);
       reader.setTable(table);
       reader.setIndexDeletionPolicy(_indexDeletionPolicy);
-      reader.setSimilarity(getSimilarity(table));
       reader.init();
       index = reader;
     } else {
@@ -518,7 +479,6 @@ public class DistributedIndexServer extends AbstractIndexServer {
       writer.setDirectory(dir);
       writer.setShard(shard);
       writer.setTable(table);
-      writer.setSimilarity(getSimilarity(table));
       writer.setTimeBetweenCommits(_timeBetweenCommits);
       writer.setTimeBetweenRefreshs(_timeBetweenRefreshs);
       writer.setWalPath(walTablePath);
@@ -530,7 +490,6 @@ public class DistributedIndexServer extends AbstractIndexServer {
       index = writer;
     }
     _filterCache.opening(table, shard, index);
-    TableDescriptor tableDescriptor = _clusterStatus.getTableDescriptor(true, _cluster, table);
     return warmUp(index, tableDescriptor, shard);
   }
 
@@ -640,13 +599,13 @@ public class DistributedIndexServer extends AbstractIndexServer {
   private synchronized Set<String> setupLayoutManager(String table) {
     DistributedLayoutManager layoutManager = new DistributedLayoutManager();
 
-    String cluster = _clusterStatus.getCluster(false, table);
+    String cluster = _clusterStatus.getClusterName();
     if (cluster == null) {
       throw new RuntimeException("Table [" + table + "] is not found.");
     }
 
-    List<String> shardServerList = _clusterStatus.getShardServerList(cluster);
-    List<String> offlineShardServers = new ArrayList<String>(_clusterStatus.getOfflineShardServers(false, cluster));
+    List<String> shardServerList = _clusterStatus.getServerList(false);
+    List<String> offlineShardServers = new ArrayList<String>(_clusterStatus.getOfflineServers(false));
     List<String> shardList = getShardList(table);
 
     layoutManager.setNodes(shardServerList);
@@ -675,8 +634,8 @@ public class DistributedIndexServer extends AbstractIndexServer {
   @Override
   public int getShardCount(String table) {
     checkTable(table);
-    TableDescriptor descriptor = getTableDescriptor(table);
-    return descriptor.shardCount;
+    TableDescriptor tableDescriptor = _clusterStatus.getTableDescriptor(true, table);
+    return tableDescriptor.getShardCount();
   }
 
   @Override
@@ -684,8 +643,8 @@ public class DistributedIndexServer extends AbstractIndexServer {
     checkTable(table);
     List<String> result = new ArrayList<String>();
     try {
-      TableDescriptor descriptor = getTableDescriptor(table);
-      Path tablePath = new Path(descriptor.tableUri);
+      TableDescriptor tableDescriptor = _clusterStatus.getTableDescriptor(true, table);
+      Path tablePath = new Path(tableDescriptor.getStoragePath());
       FileSystem fileSystem = FileSystem.get(tablePath.toUri(), _configuration);
       if (!fileSystem.exists(tablePath)) {
         LOG.error("Table [{0}] is missing, defined location [{1}]", table, tablePath.toUri());
@@ -707,23 +666,6 @@ public class DistributedIndexServer extends AbstractIndexServer {
   }
 
   @Override
-  public Similarity getSimilarity(String table) {
-    checkTable(table);
-    Similarity similarity = _tableSimilarity.get(table);
-    if (similarity == null) {
-      TableDescriptor tableDescriptor = _clusterStatus.getTableDescriptor(true, _cluster, table);
-      String similarityClass = tableDescriptor.similarityClass;
-      if (similarityClass == null) {
-        similarity = new FairSimilarity();
-      } else {
-        similarity = getInstance(similarityClass, Similarity.class);
-      }
-      _tableSimilarity.put(table, similarity);
-    }
-    return similarity;
-  }
-
-  @Override
   public long getTableSize(String table) throws IOException {
     checkTable(table);
     Path tablePath = new Path(getTableUri(table));
@@ -735,15 +677,15 @@ public class DistributedIndexServer extends AbstractIndexServer {
   @Override
   public TABLE_STATUS getTableStatus(String table) {
     checkTable(table);
-    boolean enabled = _clusterStatus.isEnabled(true, _cluster, table);
-    if (enabled) {
+    TableDescriptor tableDescriptor = _clusterStatus.getTableDescriptor(true, table);
+    if (tableDescriptor.isEnabled()) {
       return TABLE_STATUS.ENABLED;
     }
     return TABLE_STATUS.DISABLED;
   }
 
   private void checkTable(String table) {
-    if (_clusterStatus.exists(true, _cluster, table)) {
+    if (_clusterStatus.exists(true, table)) {
       return;
     }
     throw new RuntimeException("Table [" + table + "] does not exist.");
@@ -752,32 +694,8 @@ public class DistributedIndexServer extends AbstractIndexServer {
   @Override
   public String getTableUri(String table) {
     checkTable(table);
-    TableDescriptor descriptor = getTableDescriptor(table);
-    return descriptor.tableUri;
-  }
-
-  private TableDescriptor getTableDescriptor(String table) {
-    TableDescriptor tableDescriptor = _tableDescriptors.get(table);
-    if (tableDescriptor == null) {
-      tableDescriptor = _clusterStatus.getTableDescriptor(true, _cluster, table);
-      _tableDescriptors.put(table, tableDescriptor);
-    }
-    return tableDescriptor;
-  }
-
-  @SuppressWarnings("unchecked")
-  private <T> T getInstance(String className, Class<T> c) {
-    try {
-      Class<? extends T> clazz = (Class<? extends T>) Class.forName(className);
-      Object object = clazz.newInstance();
-      if (object instanceof Configurable) {
-        Configurable configurable = (Configurable) object;
-        configurable.setConf(_configuration);
-      }
-      return (T) object;
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
+    TableDescriptor tableDescriptor = _clusterStatus.getTableDescriptor(true, table);
+    return tableDescriptor.getStoragePath();
   }
 
   public void setClusterStatus(ClusterStatus clusterStatus) {

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/indexserver/LocalIndexServer.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/indexserver/LocalIndexServer.java b/src/blur-core/src/main/java/org/apache/blur/manager/indexserver/LocalIndexServer.java
index 041eb2a..fd8335d 100644
--- a/src/blur-core/src/main/java/org/apache/blur/manager/indexserver/LocalIndexServer.java
+++ b/src/blur-core/src/main/java/org/apache/blur/manager/indexserver/LocalIndexServer.java
@@ -34,14 +34,11 @@ import java.util.concurrent.ConcurrentHashMap;
 import org.apache.blur.analysis.BlurAnalyzer;
 import org.apache.blur.log.Log;
 import org.apache.blur.log.LogFactory;
-import org.apache.blur.lucene.search.FairSimilarity;
 import org.apache.blur.manager.writer.BlurIndex;
 import org.apache.blur.manager.writer.BlurIndexCloser;
 import org.apache.blur.manager.writer.BlurNRTIndex;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.lucene.analysis.core.KeywordAnalyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.analysis.util.CharArraySet;
@@ -49,7 +46,6 @@ import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MMapDirectory;
 
@@ -60,8 +56,6 @@ public class LocalIndexServer extends AbstractIndexServer {
   private Map<String, Map<String, BlurIndex>> _readersMap = new ConcurrentHashMap<String, Map<String, BlurIndex>>();
   private File _localDir;
   private BlurIndexCloser _closer;
-  private int _blockSize = 65536;
-  private CompressionCodec _compression = new DefaultCodec();
   private Path _walPath;
   private Configuration _configuration = new Configuration();
 
@@ -101,11 +95,6 @@ public class LocalIndexServer extends AbstractIndexServer {
   }
 
   @Override
-  public Similarity getSimilarity(String table) {
-    return new FairSimilarity();
-  }
-
-  @Override
   public void close() {
     _closer.close();
     for (String table : _readersMap.keySet()) {
@@ -147,7 +136,6 @@ public class LocalIndexServer extends AbstractIndexServer {
     index.setAnalyzer(getAnalyzer(table));
     index.setDirectory(dir);
     index.setShard(shard);
-    index.setSimilarity(getSimilarity(table));
     index.setTable(table);
     index.setWalPath(new Path(new Path(_walPath, table), shard));
     index.setConfiguration(_configuration);
@@ -196,16 +184,6 @@ public class LocalIndexServer extends AbstractIndexServer {
   }
 
   @Override
-  public int getCompressionBlockSize(String table) {
-    return _blockSize;
-  }
-
-  @Override
-  public CompressionCodec getCompressionCodec(String table) {
-    return _compression;
-  }
-
-  @Override
   public long getTableSize(String table) throws IOException {
     try {
       File file = new File(new URI(getTableUri(table)));

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/stats/MergerTableStats.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/stats/MergerTableStats.java b/src/blur-core/src/main/java/org/apache/blur/manager/stats/MergerTableStats.java
index 99771d2..56189f4 100644
--- a/src/blur-core/src/main/java/org/apache/blur/manager/stats/MergerTableStats.java
+++ b/src/blur-core/src/main/java/org/apache/blur/manager/stats/MergerTableStats.java
@@ -45,10 +45,10 @@ public class MergerTableStats implements Merger<TableStats> {
   }
 
   private TableStats merge(TableStats s1, TableStats s2) {
-    s1.tableName = s2.tableName;
+//    s1.tableName = s2.tableName;
     s1.bytes = Math.max(s1.bytes, s2.bytes);
-    s1.recordCount = s1.recordCount + s2.recordCount;
-    s1.rowCount = s1.rowCount + s2.rowCount;
+//    s1.recordCount = s1.recordCount + s2.recordCount;
+//    s1.rowCount = s1.rowCount + s2.rowCount;
     s1.queries = Math.max(s1.queries, s2.queries);
     return s1;
   }

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/status/MergerQueryStatus.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/status/MergerQueryStatus.java b/src/blur-core/src/main/java/org/apache/blur/manager/status/MergerQueryStatus.java
deleted file mode 100644
index 1d7197d..0000000
--- a/src/blur-core/src/main/java/org/apache/blur/manager/status/MergerQueryStatus.java
+++ /dev/null
@@ -1,85 +0,0 @@
-package org.apache.blur.manager.status;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.blur.thrift.generated.BlurException;
-import org.apache.blur.thrift.generated.BlurQueryStatus;
-import org.apache.blur.thrift.generated.CpuTime;
-import org.apache.blur.thrift.generated.QueryState;
-import org.apache.blur.utils.BlurExecutorCompletionService;
-import org.apache.blur.utils.ForkJoin.Merger;
-
-
-public class MergerQueryStatus implements Merger<List<BlurQueryStatus>> {
-
-  private long _timeout;
-
-  public MergerQueryStatus(long timeout) {
-    _timeout = timeout;
-  }
-
-  @Override
-  public List<BlurQueryStatus> merge(BlurExecutorCompletionService<List<BlurQueryStatus>> service) throws BlurException {
-    Map<Long, BlurQueryStatus> statusMap = new HashMap<Long, BlurQueryStatus>();
-    while (service.getRemainingCount() > 0) {
-      Future<List<BlurQueryStatus>> future = service.poll(_timeout, TimeUnit.MILLISECONDS, true);
-      List<BlurQueryStatus> status = service.getResultThrowException(future);
-      addToMap(statusMap, status);
-    }
-    return new ArrayList<BlurQueryStatus>(statusMap.values());
-  }
-
-  private void addToMap(Map<Long, BlurQueryStatus> statusMap, List<BlurQueryStatus> list) {
-    for (BlurQueryStatus status : list) {
-      BlurQueryStatus searchQueryStatus = statusMap.get(status.uuid);
-      if (searchQueryStatus == null) {
-        statusMap.put(status.uuid, status);
-      } else {
-        statusMap.put(status.uuid, merge(searchQueryStatus, status));
-      }
-    }
-  }
-
-  public static BlurQueryStatus merge(BlurQueryStatus s1, BlurQueryStatus s2) {
-    s1.completeShards = s1.completeShards + s2.completeShards;
-    s1.totalShards = s1.totalShards + s2.totalShards;
-    if (s1.state != s2.state) {
-      if (s1.state == QueryState.INTERRUPTED || s2.state == QueryState.INTERRUPTED) {
-        s1.state = QueryState.INTERRUPTED;
-      } else if (s1.state == QueryState.RUNNING || s2.state == QueryState.RUNNING) {
-        s1.state = QueryState.RUNNING;
-      } else {
-        s1.state = QueryState.COMPLETE;
-      }
-    }
-    if (s1.cpuTimes == null) {
-      s1.cpuTimes = new HashMap<String, CpuTime>();
-    }
-    if (s2.cpuTimes != null) {
-      s1.cpuTimes.putAll(s2.cpuTimes);
-    }
-    return s1;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/status/MergerQueryStatusSingle.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/status/MergerQueryStatusSingle.java b/src/blur-core/src/main/java/org/apache/blur/manager/status/MergerQueryStatusSingle.java
deleted file mode 100644
index f3d922d..0000000
--- a/src/blur-core/src/main/java/org/apache/blur/manager/status/MergerQueryStatusSingle.java
+++ /dev/null
@@ -1,50 +0,0 @@
-package org.apache.blur.manager.status;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.blur.thrift.generated.BlurException;
-import org.apache.blur.thrift.generated.BlurQueryStatus;
-import org.apache.blur.utils.BlurExecutorCompletionService;
-import org.apache.blur.utils.ForkJoin.Merger;
-
-
-public class MergerQueryStatusSingle implements Merger<BlurQueryStatus> {
-
-  private long _timeout;
-
-  public MergerQueryStatusSingle(long timeout) {
-    _timeout = timeout;
-  }
-
-  @Override
-  public BlurQueryStatus merge(BlurExecutorCompletionService<BlurQueryStatus> service) throws BlurException {
-    BlurQueryStatus result = null;
-    while (service.getRemainingCount() > 0) {
-      Future<BlurQueryStatus> future = service.poll(_timeout, TimeUnit.MILLISECONDS, true);
-      BlurQueryStatus status = service.getResultThrowException(future);
-      if (result == null) {
-        result = status;
-      } else {
-        result = MergerQueryStatus.merge(result, status);
-      }
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/aee531c2/src/blur-core/src/main/java/org/apache/blur/manager/writer/AbstractBlurIndex.java
----------------------------------------------------------------------
diff --git a/src/blur-core/src/main/java/org/apache/blur/manager/writer/AbstractBlurIndex.java b/src/blur-core/src/main/java/org/apache/blur/manager/writer/AbstractBlurIndex.java
index b758f11..a9fe542 100644
--- a/src/blur-core/src/main/java/org/apache/blur/manager/writer/AbstractBlurIndex.java
+++ b/src/blur-core/src/main/java/org/apache/blur/manager/writer/AbstractBlurIndex.java
@@ -31,7 +31,6 @@ import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
 import org.apache.lucene.index.TieredMergePolicy;
-import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.Directory;
 
 public abstract class AbstractBlurIndex extends BlurIndex {
@@ -45,14 +44,12 @@ public abstract class AbstractBlurIndex extends BlurIndex {
   private AtomicBoolean _open = new AtomicBoolean();
   private BlurIndexRefresher _refresher;
   private String _shard;
-  private Similarity _similarity;
   private String _table;
 
   protected IndexWriterConfig initIndexWriterConfig() {
     IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, _analyzer);
     conf.setWriteLockTimeout(TimeUnit.MINUTES.toMillis(5));
     conf.setIndexDeletionPolicy(_indexDeletionPolicy);
-    conf.setSimilarity(_similarity);
     TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
     mergePolicy.setUseCompoundFile(false);
     _open.set(true);
@@ -132,10 +129,6 @@ public abstract class AbstractBlurIndex extends BlurIndex {
     this._shard = shard;
   }
 
-  public void setSimilarity(Similarity similarity) {
-    _similarity = similarity;
-  }
-
   public void setTable(String table) {
     this._table = table;
   }


Mime
View raw message