incubator-blur-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From amccu...@apache.org
Subject [29/51] [partial] Initial repackage to org.apache.
Date Mon, 03 Sep 2012 03:17:13 GMT
http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileReader.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileReader.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileReader.java
new file mode 100644
index 0000000..35ecaf8
--- /dev/null
+++ b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileReader.java
@@ -0,0 +1,188 @@
+package org.apache.blur.store.hdfs;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.blur.log.Log;
+import org.apache.blur.log.LogFactory;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.store.DataInput;
+
+
+public class HdfsFileReader extends DataInput {
+
+  private static final Log LOG = LogFactory.getLog(HdfsFileReader.class);
+
+  private static final int VERSION = -1;
+
+  private final long _length;
+  private final long _hdfsLength;
+  private final List<HdfsMetaBlock> _metaBlocks;
+  private FSDataInputStream _inputStream;
+  private long _logicalPos;
+  private long _boundary;
+  private long _realPos;
+  private boolean isClone;
+
+  public HdfsFileReader(FileSystem fileSystem, Path path, int bufferSize) throws IOException {
+    if (!fileSystem.exists(path)) {
+      throw new FileNotFoundException(path.toString());
+    }
+    FileStatus fileStatus = fileSystem.getFileStatus(path);
+    _hdfsLength = fileStatus.getLen();
+    _inputStream = fileSystem.open(path, bufferSize);
+
+    // read meta blocks
+    _inputStream.seek(_hdfsLength - 16);
+    int numberOfBlocks = _inputStream.readInt();
+    _length = _inputStream.readLong();
+    int version = _inputStream.readInt();
+    if (version != VERSION) {
+      throw new RuntimeException("Version of file [" + version + "] does not match reader [" + VERSION + "]");
+    }
+    _inputStream.seek(_hdfsLength - 16 - (numberOfBlocks * 24)); // 3 longs per
+                                                                 // block
+    _metaBlocks = new ArrayList<HdfsMetaBlock>(numberOfBlocks);
+    for (int i = 0; i < numberOfBlocks; i++) {
+      HdfsMetaBlock hdfsMetaBlock = new HdfsMetaBlock();
+      hdfsMetaBlock.readFields(_inputStream);
+      _metaBlocks.add(hdfsMetaBlock);
+    }
+    seek(0);
+  }
+
+  public HdfsFileReader(FileSystem fileSystem, Path path) throws IOException {
+    this(fileSystem, path, HdfsDirectory.BUFFER_SIZE);
+  }
+
+  public long getPosition() {
+    return _logicalPos;
+  }
+
+  public long length() {
+    return _length;
+  }
+
+  public void seek(long pos) throws IOException {
+    if (_logicalPos == pos) {
+      return;
+    }
+    _logicalPos = pos;
+    seekInternal();
+  }
+
+  public void close() throws IOException {
+    if (!isClone) {
+      _inputStream.close();
+    }
+  }
+
+  /**
+   * This method should never be used!
+   */
+  @Override
+  public byte readByte() throws IOException {
+    LOG.warn("Should not be used!");
+    byte[] buf = new byte[1];
+    readBytes(buf, 0, 1);
+    return buf[0];
+  }
+
+  @Override
+  public void readBytes(byte[] b, int offset, int len) throws IOException {
+    checkBoundary();
+    // might need to read in multiple stages
+    while (len > 0) {
+      if (_logicalPos >= _boundary) {
+        seekInternal();
+      }
+      int lengthToRead = (int) Math.min(_boundary - _logicalPos, len);
+      _inputStream.read(_realPos, b, offset, lengthToRead);
+      offset += lengthToRead;
+      _logicalPos += lengthToRead;
+      _realPos += lengthToRead;
+      len -= lengthToRead;
+    }
+  }
+
+  private void checkBoundary() throws IOException {
+    if (_boundary == -1l) {
+      throw new IOException("eof");
+    }
+  }
+
+  private void seekInternal() throws IOException {
+    HdfsMetaBlock block = null;
+    for (HdfsMetaBlock b : _metaBlocks) {
+      if (b.containsDataAt(_logicalPos)) {
+        block = b;
+      }
+    }
+    if (block == null) {
+      _boundary = -1l;
+      _realPos = -1l;
+    } else {
+      _realPos = block.getRealPosition(_logicalPos);
+      _boundary = getBoundary(block);
+    }
+  }
+
+  private long getBoundary(HdfsMetaBlock block) {
+    _boundary = block.logicalPosition + block.length;
+    for (HdfsMetaBlock b : _metaBlocks) {
+      if (b.logicalPosition > block.logicalPosition && b.logicalPosition < _boundary && b.logicalPosition >= _logicalPos) {
+        _boundary = b.logicalPosition;
+      }
+    }
+    return _boundary;
+  }
+
+  public static long getLength(FileSystem fileSystem, Path path) throws IOException {
+    FSDataInputStream inputStream = null;
+    try {
+      FileStatus fileStatus = fileSystem.getFileStatus(path);
+      inputStream = fileSystem.open(path);
+      long hdfsLength = fileStatus.getLen();
+      inputStream.seek(hdfsLength - 12);
+      long length = inputStream.readLong();
+      int version = inputStream.readInt();
+      if (version != VERSION) {
+        throw new RuntimeException("Version of file [" + version + "] does not match reader [" + VERSION + "]");
+      }
+      return length;
+    } finally {
+      if (inputStream != null) {
+        inputStream.close();
+      }
+    }
+  }
+
+  @Override
+  public Object clone() {
+    HdfsFileReader reader = (HdfsFileReader) super.clone();
+    reader.isClone = true;
+    return reader;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileWriter.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileWriter.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileWriter.java
new file mode 100644
index 0000000..1c23f60
--- /dev/null
+++ b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileWriter.java
@@ -0,0 +1,99 @@
+package org.apache.blur.store.hdfs;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.store.DataOutput;
+
+public class HdfsFileWriter extends DataOutput {
+
+  public static final int VERSION = -1;
+
+  private FSDataOutputStream _outputStream;
+  private HdfsMetaBlock _block;
+  private List<HdfsMetaBlock> _blocks = new ArrayList<HdfsMetaBlock>();
+  private long _length;
+  private long _currentPosition;
+
+  public HdfsFileWriter(FileSystem fileSystem, Path path) throws IOException {
+    _outputStream = fileSystem.create(path);
+    seek(0);
+  }
+
+  public long length() {
+    return _length;
+  }
+
+  public void seek(long pos) throws IOException {
+    if (_block != null) {
+      _blocks.add(_block);
+    }
+    _block = new HdfsMetaBlock();
+    _block.realPosition = _outputStream.getPos();
+    _block.logicalPosition = pos;
+    _currentPosition = pos;
+  }
+
+  public void close() throws IOException {
+    if (_block != null) {
+      _blocks.add(_block);
+    }
+    flushMetaBlocks();
+    _outputStream.close();
+  }
+
+  private void flushMetaBlocks() throws IOException {
+    for (HdfsMetaBlock block : _blocks) {
+      block.write(_outputStream);
+    }
+    _outputStream.writeInt(_blocks.size());
+    _outputStream.writeLong(length());
+    _outputStream.writeInt(VERSION);
+  }
+
+  @Override
+  public void writeByte(byte b) throws IOException {
+    _outputStream.write(b & 0xFF);
+    _block.length++;
+    _currentPosition++;
+    updateLength();
+  }
+
+  @Override
+  public void writeBytes(byte[] b, int offset, int length) throws IOException {
+    _outputStream.write(b, offset, length);
+    _block.length += length;
+    _currentPosition += length;
+    updateLength();
+  }
+
+  private void updateLength() {
+    if (_currentPosition > _length) {
+      _length = _currentPosition;
+    }
+  }
+
+  public long getPosition() {
+    return _currentPosition;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsMetaBlock.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsMetaBlock.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsMetaBlock.java
new file mode 100644
index 0000000..b939293
--- /dev/null
+++ b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsMetaBlock.java
@@ -0,0 +1,61 @@
+package org.apache.blur.store.hdfs;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Writable;
+
+class HdfsMetaBlock implements Writable {
+  long logicalPosition;
+  long realPosition;
+  long length;
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    logicalPosition = in.readLong();
+    realPosition = in.readLong();
+    length = in.readLong();
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeLong(logicalPosition);
+    out.writeLong(realPosition);
+    out.writeLong(length);
+  }
+
+  boolean containsDataAt(long logicalPos) {
+    if (logicalPos >= logicalPosition && logicalPos < logicalPosition + length) {
+      return true;
+    }
+    return false;
+  }
+
+  long getRealPosition(long logicalPos) {
+    long offset = logicalPos - logicalPosition;
+    long pos = realPosition + offset;
+    return pos;
+  }
+
+  @Override
+  public String toString() {
+    return "HdfsMetaBlock [length=" + length + ", logicalPosition=" + logicalPosition + ", realPosition=" + realPosition + "]";
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/main/java/org/apache/blur/store/hdfs/NullIndexOutput.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/NullIndexOutput.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/NullIndexOutput.java
new file mode 100644
index 0000000..1a03c10
--- /dev/null
+++ b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/NullIndexOutput.java
@@ -0,0 +1,70 @@
+package org.apache.blur.store.hdfs;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+
+import org.apache.lucene.store.IndexOutput;
+
+public class NullIndexOutput extends IndexOutput {
+
+  private long _pos;
+  private long _length;
+
+  @Override
+  public void close() throws IOException {
+
+  }
+
+  @Override
+  public void flush() throws IOException {
+
+  }
+
+  @Override
+  public long getFilePointer() {
+    return _pos;
+  }
+
+  @Override
+  public long length() throws IOException {
+    return _length;
+  }
+
+  @Override
+  public void seek(long pos) throws IOException {
+    _pos = pos;
+  }
+
+  @Override
+  public void writeByte(byte b) throws IOException {
+    _pos++;
+  }
+
+  @Override
+  public void writeBytes(byte[] b, int offset, int length) throws IOException {
+    _pos += length;
+    updateLength();
+  }
+
+  private void updateLength() {
+    if (_pos > _length) {
+      _length = _pos;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/main/java/org/apache/blur/store/lock/BlurLockFactory.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/lock/BlurLockFactory.java b/src/blur-store/src/main/java/org/apache/blur/store/lock/BlurLockFactory.java
new file mode 100644
index 0000000..c79608c
--- /dev/null
+++ b/src/blur-store/src/main/java/org/apache/blur/store/lock/BlurLockFactory.java
@@ -0,0 +1,102 @@
+package org.apache.blur.store.lock;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.LockFactory;
+
+public class BlurLockFactory extends LockFactory {
+
+  private final Configuration _configuration;
+  private final FileSystem _fileSystem;
+  private final String _baseLockKey;
+  private byte[] _lockKey;
+  private final Path _dir;
+
+  public BlurLockFactory(Configuration configuration, Path dir, String host, int pid) throws IOException {
+    _configuration = configuration;
+    _dir = dir;
+    _fileSystem = _dir.getFileSystem(_configuration);
+    _baseLockKey = host + "/" + pid;
+  }
+
+  @Override
+  public Lock makeLock(String lockName) {
+    final Path lockPath = new Path(_dir, lockName);
+    return new Lock() {
+      private boolean _set;
+
+      @Override
+      public boolean obtain() throws IOException {
+        if (_set) {
+          throw new IOException("Lock for [" + _baseLockKey + "] can only be set once.");
+        }
+        try {
+          _lockKey = (_baseLockKey + "/" + System.currentTimeMillis()).getBytes();
+          FSDataOutputStream outputStream = _fileSystem.create(lockPath, true);
+          outputStream.write(_lockKey);
+          outputStream.close();
+        } finally {
+          _set = true;
+        }
+        return true;
+      }
+
+      @Override
+      public void release() throws IOException {
+        _fileSystem.delete(lockPath, false);
+      }
+
+      @Override
+      public boolean isLocked() throws IOException {
+        if (!_set) {
+          return false;
+        }
+        if (!_fileSystem.exists(lockPath)) {
+          return false;
+        }
+        FileStatus fileStatus = _fileSystem.getFileStatus(lockPath);
+        long len = fileStatus.getLen();
+        if (len != _lockKey.length) {
+          return false;
+        }
+        byte[] buf = new byte[_lockKey.length];
+        FSDataInputStream inputStream = _fileSystem.open(lockPath);
+        inputStream.readFully(buf);
+        inputStream.close();
+        if (Arrays.equals(_lockKey, buf)) {
+          return true;
+        }
+        return false;
+      }
+    };
+  }
+
+  @Override
+  public void clearLock(String lockName) throws IOException {
+    _fileSystem.delete(new Path(_dir, lockName), false);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBounds.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBounds.java b/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBounds.java
index e8e9dca..c159fbc 100644
--- a/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBounds.java
+++ b/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBounds.java
@@ -25,6 +25,9 @@ import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.blur.log.Log;
+import org.apache.blur.log.LogFactory;
+import org.apache.blur.lucene.LuceneConstant;
 import org.apache.lucene.analysis.KeywordAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -36,9 +39,6 @@ import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.ReaderUtil;
 
-import com.nearinfinity.blur.log.Log;
-import com.nearinfinity.blur.log.LogFactory;
-import com.nearinfinity.blur.lucene.LuceneConstant;
 
 public class WarmUpByFieldBounds {
 

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/com/nearinfinity/blur/store/BenchmarkDirectory.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/com/nearinfinity/blur/store/BenchmarkDirectory.java b/src/blur-store/src/test/java/com/nearinfinity/blur/store/BenchmarkDirectory.java
deleted file mode 100644
index d8ecbe1..0000000
--- a/src/blur-store/src/test/java/com/nearinfinity/blur/store/BenchmarkDirectory.java
+++ /dev/null
@@ -1,148 +0,0 @@
-package com.nearinfinity.blur.store;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.UUID;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.index.TieredMergePolicy;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.WildcardQuery;
-
-import com.nearinfinity.blur.metrics.BlurMetrics;
-import com.nearinfinity.blur.store.blockcache.BlockCache;
-import com.nearinfinity.blur.store.blockcache.BlockDirectory;
-import com.nearinfinity.blur.store.blockcache.BlockDirectoryCache;
-import com.nearinfinity.blur.store.hdfs.HdfsDirectory;
-import com.nearinfinity.blur.store.lock.BlurLockFactory;
-
-import static com.nearinfinity.blur.lucene.LuceneConstant.LUCENE_VERSION;
-
-public class BenchmarkDirectory {
-
-  public static void main(String[] args) throws IOException {
-    int blockSize = BlockDirectory.BLOCK_SIZE;
-    long totalMemory = BlockCache._128M * 2;
-    int slabSize = (int) (totalMemory / 2);
-
-    BlockCache blockCache = new BlockCache(new BlurMetrics(new Configuration()), true, totalMemory, slabSize, blockSize);
-    BlurMetrics metrics = new BlurMetrics(new Configuration());
-    BlockDirectoryCache cache = new BlockDirectoryCache(blockCache, metrics);
-
-    Configuration configuration = new Configuration();
-    Path p = new Path("hdfs://localhost:9000/bench");
-    BlurLockFactory factory = new BlurLockFactory(configuration, p, "localhost", 0);
-
-    FileSystem fs = FileSystem.get(p.toUri(), configuration);
-    fs.delete(p, true);
-
-    final HdfsDirectory dir = new HdfsDirectory(p);
-    dir.setLockFactory(factory);
-
-    BlockDirectory directory = new BlockDirectory("test", dir, cache);
-
-    while (true) {
-      long s, e;
-
-      s = System.currentTimeMillis();
-      IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, new StandardAnalyzer(LUCENE_VERSION));
-      TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
-      mergePolicy.setUseCompoundFile(false);
-      IndexWriter writer = new IndexWriter(directory, conf);
-      for (int i = 0; i < 1000000; i++) {
-        writer.addDocument(getDoc());
-      }
-      writer.close();
-      e = System.currentTimeMillis();
-      System.out.println("Indexing " + (e - s));
-
-      IndexReader reader = IndexReader.open(directory);
-      System.out.println("Docs " + reader.numDocs());
-      TermEnum terms = reader.terms();
-      List<Term> sample = new ArrayList<Term>();
-      int limit = 1000;
-      Random random = new Random();
-      SAMPLE: while (terms.next()) {
-        if (sample.size() < limit) {
-          if (random.nextInt() % 7 == 0) {
-            sample.add(terms.term());
-          }
-        } else {
-          break SAMPLE;
-        }
-      }
-      terms.close();
-
-      System.out.println("Sampling complete [" + sample.size() + "]");
-      IndexSearcher searcher = new IndexSearcher(reader);
-      long total = 0;
-      long time = 0;
-      int search = 10;
-      for (int i = 0; i < search; i++) {
-        s = System.currentTimeMillis();
-        TopDocs topDocs = searcher.search(new TermQuery(sample.get(random.nextInt(sample.size()))), 10);
-        total += topDocs.totalHits;
-        e = System.currentTimeMillis();
-        time += (e - s);
-      }
-      System.out.println("Searching " + time + " " + (time / (double) search) + " " + total);
-      for (int i = 0; i < 10; i++) {
-        s = System.currentTimeMillis();
-        TopDocs topDocs = searcher.search(new WildcardQuery(new Term("name", "fff*0*")), 10);
-        e = System.currentTimeMillis();
-        System.out.println(topDocs.totalHits + " " + (e - s));
-      }
-      reader.close();
-    }
-  }
-
-  private static Document getDoc() {
-    Document document = new Document();
-    document.add(new Field("name", UUID.randomUUID().toString(), Store.YES, Index.ANALYZED_NO_NORMS));
-    return document;
-  }
-
-  public static int getNumberOfSlabs(float heapPercentage, int numberOfBlocksPerSlab, int blockSize) {
-    long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
-    long targetBytes = (long) (max * heapPercentage);
-    int slabSize = numberOfBlocksPerSlab * blockSize;
-    int slabs = (int) (targetBytes / slabSize);
-    if (slabs == 0) {
-      throw new RuntimeException("Minimum heap size is 512m!");
-    }
-    return slabs;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/com/nearinfinity/blur/store/BenchmarkDirectoryNrt.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/com/nearinfinity/blur/store/BenchmarkDirectoryNrt.java b/src/blur-store/src/test/java/com/nearinfinity/blur/store/BenchmarkDirectoryNrt.java
deleted file mode 100644
index d713cda..0000000
--- a/src/blur-store/src/test/java/com/nearinfinity/blur/store/BenchmarkDirectoryNrt.java
+++ /dev/null
@@ -1,160 +0,0 @@
-package com.nearinfinity.blur.store;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.UUID;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.TieredMergePolicy;
-import org.apache.lucene.store.NoLockFactory;
-
-import com.nearinfinity.blur.metrics.BlurMetrics;
-import com.nearinfinity.blur.store.blockcache.BlockCache;
-import com.nearinfinity.blur.store.blockcache.BlockDirectory;
-import com.nearinfinity.blur.store.blockcache.BlockDirectoryCache;
-import com.nearinfinity.blur.store.hdfs.HdfsDirectory;
-
-import static com.nearinfinity.blur.lucene.LuceneConstant.LUCENE_VERSION;
-
-public class BenchmarkDirectoryNrt {
-
-  public static void main(String[] args) throws IOException, InterruptedException {
-    int blockSize = BlockDirectory.BLOCK_SIZE;
-    long totalMemory = BlockCache._128M * 2;
-    int slabSize = (int) (totalMemory / 2);
-
-    BlockCache blockCache = new BlockCache(new BlurMetrics(new Configuration()), true, totalMemory, slabSize, blockSize);
-    BlurMetrics metrics = new BlurMetrics(new Configuration());
-    BlockDirectoryCache cache = new BlockDirectoryCache(blockCache, metrics);
-
-    Path p = new Path("hdfs://localhost:9000/bench");
-    FileSystem fs = FileSystem.get(p.toUri(), new Configuration());
-    fs.delete(p, true);
-
-    final HdfsDirectory dir = new HdfsDirectory(p);
-    dir.setLockFactory(NoLockFactory.getNoLockFactory());
-
-    BlockDirectory directory = new BlockDirectory("test", dir, cache);
-
-    while (true) {
-      IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, new StandardAnalyzer(LUCENE_VERSION));
-      TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
-      mergePolicy.setUseCompoundFile(false);
-      int count = 0;
-      int max = 10000;
-      long s = System.currentTimeMillis();
-      IndexWriter writer = new IndexWriter(directory, conf);
-      long as = System.currentTimeMillis();
-      BlockingQueue<Collection<Document>> queue = new ArrayBlockingQueue<Collection<Document>>(1024);
-      Indexer indexer = new Indexer(queue, writer);
-      new Thread(indexer).start();
-      for (int i = 0; i < 1000000; i++) {
-        if (count >= max) {
-          double aseconds = (System.currentTimeMillis() - as) / 1000.0;
-          double arate = i / aseconds;
-          double seconds = (System.currentTimeMillis() - s) / 1000.0;
-          double rate = count / seconds;
-          System.out.println("Total [" + i + "] Rate [" + rate + "] AvgRate [" + arate + "] Doc count [" + indexer.getReader().numDocs() + "]");
-          count = 0;
-          s = System.currentTimeMillis();
-        }
-        queue.put(Arrays.asList(getDoc()));
-        count++;
-      }
-      writer.close();
-    }
-  }
-
-  private static class Indexer implements Runnable {
-
-    private BlockingQueue<Collection<Document>> _queue;
-    private AtomicBoolean _running = new AtomicBoolean(true);
-    private IndexWriter _writer;
-    private IndexReader _reader;
-
-    public Indexer(BlockingQueue<Collection<Document>> queue, IndexWriter writer) throws CorruptIndexException, IOException {
-      _queue = queue;
-      _writer = writer;
-      _reader = IndexReader.open(_writer, true);
-    }
-
-    public IndexReader getReader() {
-      return _reader;
-    }
-
-    @Override
-    public void run() {
-      long cycleTime = 50000000;
-      long start = System.nanoTime();
-      while (_running.get()) {
-        try {
-          Collection<Document> docs = _queue.take();
-          _writer.addDocuments(docs);
-          if (start + cycleTime < System.nanoTime()) {
-            IndexReader newReader = IndexReader.open(_writer, true);
-            _reader.close();
-            _reader = newReader;
-            start = System.nanoTime();
-          }
-        } catch (InterruptedException e) {
-          return;
-        } catch (CorruptIndexException e) {
-          e.printStackTrace();
-          return;
-        } catch (IOException e) {
-          e.printStackTrace();
-          return;
-        }
-      }
-    }
-  }
-
-  private static Document getDoc() {
-    Document document = new Document();
-    document.add(new Field("name", UUID.randomUUID().toString(), Store.YES, Index.ANALYZED_NO_NORMS));
-    return document;
-  }
-
-  public static int getNumberOfSlabs(float heapPercentage, int numberOfBlocksPerSlab, int blockSize) {
-    long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
-    long targetBytes = (long) (max * heapPercentage);
-    int slabSize = numberOfBlocksPerSlab * blockSize;
-    int slabs = (int) (targetBytes / slabSize);
-    if (slabs == 0) {
-      throw new RuntimeException("Minimum heap size is 512m!");
-    }
-    return slabs;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/com/nearinfinity/blur/store/HdfsDirectoryTest.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/com/nearinfinity/blur/store/HdfsDirectoryTest.java b/src/blur-store/src/test/java/com/nearinfinity/blur/store/HdfsDirectoryTest.java
deleted file mode 100644
index 1a03156..0000000
--- a/src/blur-store/src/test/java/com/nearinfinity/blur/store/HdfsDirectoryTest.java
+++ /dev/null
@@ -1,203 +0,0 @@
-package com.nearinfinity.blur.store;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.util.HashSet;
-import java.util.Random;
-import java.util.Set;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.RAMDirectory;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.nearinfinity.blur.store.hdfs.HdfsDirectory;
-
-public class HdfsDirectoryTest {
-
-  private static final int MAX_NUMBER_OF_WRITES = 10000;
-  private static final int MIN_FILE_SIZE = 100;
-  private static final int MAX_FILE_SIZE = 100000;
-  private static final int MIN_BUFFER_SIZE = 1;
-  private static final int MAX_BUFFER_SIZE = 5000;
-  private static final int MAX_NUMBER_OF_READS = 10000;
-  private HdfsDirectory directory;
-  private File file;
-  private long seed;
-  private Random random;
-
-  @Before
-  public void setUp() throws IOException {
-    file = new File("./tmp");
-    rm(file);
-    URI uri = new File(file, "hdfs").toURI();
-    Path hdfsDirPath = new Path(uri.toString());
-    directory = new HdfsDirectory(hdfsDirPath);
-    seed = new Random().nextLong();
-    // seed = 7392202912208392081L;
-    random = new Random(seed);
-  }
-
-  @Test
-  public void testWritingAndReadingAFile() throws IOException {
-
-    IndexOutput output = directory.createOutput("testing.test");
-    output.writeInt(12345);
-    output.flush();
-    output.close();
-
-    IndexInput input = directory.openInput("testing.test");
-    assertEquals(12345, input.readInt());
-    input.close();
-
-    String[] listAll = directory.listAll();
-    assertEquals(1, listAll.length);
-    assertEquals("testing.test", listAll[0]);
-
-    assertEquals(4, directory.fileLength("testing.test"));
-
-    IndexInput input1 = directory.openInput("testing.test");
-
-    IndexInput input2 = (IndexInput) input1.clone();
-    assertEquals(12345, input2.readInt());
-    input2.close();
-
-    assertEquals(12345, input1.readInt());
-    input1.close();
-
-    assertFalse(directory.fileExists("testing.test.other"));
-    assertTrue(directory.fileExists("testing.test"));
-    directory.deleteFile("testing.test");
-    assertFalse(directory.fileExists("testing.test"));
-  }
-
-  @Test
-  public void testEOF() throws IOException {
-    Directory fsDir = new RAMDirectory();
-    String name = "test.eof";
-    createFile(name, fsDir, directory);
-    long fsLength = fsDir.fileLength(name);
-    long hdfsLength = directory.fileLength(name);
-    assertEquals(fsLength, hdfsLength);
-    testEof(name, fsDir, fsLength);
-    testEof(name, directory, hdfsLength);
-  }
-
-  private void testEof(String name, Directory directory, long length) throws IOException {
-    IndexInput input = directory.openInput(name);
-    input.seek(length);
-    try {
-      input.readByte();
-      fail("should throw eof");
-    } catch (IOException e) {
-    }
-  }
-
-  @Test
-  public void testRandomAccessWrites() throws IOException {
-    int i = 0;
-    try {
-      Set<String> names = new HashSet<String>();
-      for (; i < 10; i++) {
-        Directory fsDir = new RAMDirectory();
-        String name = getName();
-        System.out.println("Working on pass [" + i + "] seed [" + seed + "] contains [" + names.contains(name) + "]");
-        names.add(name);
-        createFile(name, fsDir, directory);
-        assertInputsEquals(name, fsDir, directory);
-        fsDir.close();
-      }
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail("Test failed with seed [" + seed + "] on pass [" + i + "]");
-    }
-  }
-
-  private void assertInputsEquals(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException {
-    int reads = random.nextInt(MAX_NUMBER_OF_READS);
-    int buffer = random.nextInt(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE) + MIN_BUFFER_SIZE;
-    IndexInput fsInput = fsDir.openInput(name, buffer);
-    IndexInput hdfsInput = hdfs.openInput(name, buffer);
-    assertEquals(fsInput.length(), hdfsInput.length());
-    int fileLength = (int) fsInput.length();
-    for (int i = 0; i < reads; i++) {
-      byte[] fsBuf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
-      byte[] hdfsBuf = new byte[fsBuf.length];
-      int offset = random.nextInt(fsBuf.length);
-      int length = random.nextInt(fsBuf.length - offset);
-      int pos = random.nextInt(fileLength - length);
-      fsInput.seek(pos);
-      fsInput.readBytes(fsBuf, offset, length);
-      hdfsInput.seek(pos);
-      hdfsInput.readBytes(hdfsBuf, offset, length);
-      for (int f = offset; f < length; f++) {
-        if (fsBuf[f] != hdfsBuf[f]) {
-          fail();
-        }
-      }
-    }
-    fsInput.close();
-    hdfsInput.close();
-  }
-
-  private void createFile(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException {
-    int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
-    int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
-    IndexOutput fsOutput = fsDir.createOutput(name);
-    fsOutput.setLength(fileLength);
-    IndexOutput hdfsOutput = hdfs.createOutput(name);
-    hdfsOutput.setLength(fileLength);
-    for (int i = 0; i < writes; i++) {
-      byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
-      random.nextBytes(buf);
-      int offset = random.nextInt(buf.length);
-      int length = random.nextInt(buf.length - offset);
-      fsOutput.writeBytes(buf, offset, length);
-      hdfsOutput.writeBytes(buf, offset, length);
-    }
-    fsOutput.close();
-    hdfsOutput.close();
-  }
-
-  private String getName() {
-    return Long.toString(Math.abs(random.nextLong()));
-  }
-
-  public static void rm(File file) {
-    if (!file.exists()) {
-      return;
-    }
-    if (file.isDirectory()) {
-      for (File f : file.listFiles()) {
-        rm(f);
-      }
-    }
-    file.delete();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/com/nearinfinity/blur/store/UsingHdfsDir.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/com/nearinfinity/blur/store/UsingHdfsDir.java b/src/blur-store/src/test/java/com/nearinfinity/blur/store/UsingHdfsDir.java
deleted file mode 100644
index 7cbfc57..0000000
--- a/src/blur-store/src/test/java/com/nearinfinity/blur/store/UsingHdfsDir.java
+++ /dev/null
@@ -1,112 +0,0 @@
-package com.nearinfinity.blur.store;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import static com.nearinfinity.blur.lucene.LuceneConstant.LUCENE_VERSION;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.store.NoLockFactory;
-
-import com.nearinfinity.blur.store.hdfs.HdfsDirectory;
-
-public class UsingHdfsDir {
-
-  public static void main(String[] args) throws IOException {
-
-    // FileSystem fs = FileSystem.getLocal(new Configuration());
-    // Path p = new Path("file:///tmp/testdir");
-
-    Path p = new Path("hdfs://localhost:9000/test-dir");
-    FileSystem fs = FileSystem.get(p.toUri(), new Configuration());
-    fs.delete(p, true);
-
-    final HdfsDirectory directory = new HdfsDirectory(p);
-    directory.setLockFactory(NoLockFactory.getNoLockFactory());
-
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(LUCENE_VERSION, new StandardAnalyzer(LUCENE_VERSION)));
-    for (int i = 0; i < 100000; i++) {
-      writer.addDocument(getDoc());
-    }
-    writer.close();
-
-    IndexReader reader = IndexReader.open(directory);
-    TermEnum terms = reader.terms();
-    while (terms.next()) {
-      System.out.println(terms.term());
-    }
-    terms.close();
-
-    IndexSearcher searcher = new IndexSearcher(reader);
-    TopDocs topDocs = searcher.search(new TermQuery(new Term("name", "ffff")), 10);
-    System.out.println(topDocs.totalHits);
-
-    reader.close();
-
-    List<String> files = new ArrayList<String>(Arrays.asList(directory.listAll()));
-    Collections.sort(files, new Comparator<String>() {
-      @Override
-      public int compare(String o1, String o2) {
-        try {
-          long fileLength1 = directory.fileLength(o1);
-          long fileLength2 = directory.fileLength(o2);
-          if (fileLength1 == fileLength2) {
-            return o1.compareTo(o2);
-          }
-          return (int) (fileLength2 - fileLength1);
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
-
-    for (String file : files) {
-      System.out.println(file + " " + directory.fileLength(file));
-    }
-
-    directory.close();
-  }
-
-  private static Document getDoc() {
-    Document document = new Document();
-    document.add(new Field("name", UUID.randomUUID().toString(), Store.YES, Index.ANALYZED_NO_NORMS));
-    return document;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/com/nearinfinity/blur/store/blockcache/BlockCacheTest.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/com/nearinfinity/blur/store/blockcache/BlockCacheTest.java b/src/blur-store/src/test/java/com/nearinfinity/blur/store/blockcache/BlockCacheTest.java
deleted file mode 100644
index 13f8260..0000000
--- a/src/blur-store/src/test/java/com/nearinfinity/blur/store/blockcache/BlockCacheTest.java
+++ /dev/null
@@ -1,85 +0,0 @@
-package com.nearinfinity.blur.store.blockcache;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import static org.junit.Assert.assertTrue;
-
-import java.util.Arrays;
-import java.util.Random;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-import com.nearinfinity.blur.metrics.BlurMetrics;
-
-public class BlockCacheTest {
-  @Test
-  public void testBlockCache() {
-    int blocksInTest = 2000000;
-    int blockSize = 1024;
-
-    int slabSize = blockSize * 4096;
-    long totalMemory = 2 * slabSize;
-
-    BlockCache blockCache = new BlockCache(new BlurMetrics(new Configuration()), true, totalMemory, slabSize, blockSize);
-    byte[] buffer = new byte[1024];
-    Random random = new Random();
-    byte[] newData = new byte[blockSize];
-    AtomicLong hitsInCache = new AtomicLong();
-    AtomicLong missesInCache = new AtomicLong();
-    long storeTime = 0;
-    long fetchTime = 0;
-    int passes = 10000;
-
-    BlockCacheKey blockCacheKey = new BlockCacheKey();
-
-    for (int j = 0; j < passes; j++) {
-      long block = random.nextInt(blocksInTest);
-      int file = 0;
-      blockCacheKey.setBlock(block);
-      blockCacheKey.setFile(file);
-
-      if (blockCache.fetch(blockCacheKey, buffer)) {
-        hitsInCache.incrementAndGet();
-      } else {
-        missesInCache.incrementAndGet();
-      }
-
-      byte[] testData = testData(random, blockSize, newData);
-      long t1 = System.nanoTime();
-      blockCache.store(blockCacheKey, testData);
-      storeTime += (System.nanoTime() - t1);
-
-      long t3 = System.nanoTime();
-      if (blockCache.fetch(blockCacheKey, buffer)) {
-        fetchTime += (System.nanoTime() - t3);
-        assertTrue(Arrays.equals(testData, buffer));
-      }
-    }
-    System.out.println("Cache Hits    = " + hitsInCache.get());
-    System.out.println("Cache Misses  = " + missesInCache.get());
-    System.out.println("Store         = " + (storeTime / (double) passes) / 1000000.0);
-    System.out.println("Fetch         = " + (fetchTime / (double) passes) / 1000000.0);
-    System.out.println("# of Elements = " + blockCache.getSize());
-  }
-
-  private static byte[] testData(Random random, int size, byte[] buf) {
-    random.nextBytes(buf);
-    return buf;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/com/nearinfinity/blur/store/blockcache/BlockDirectoryTest.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/com/nearinfinity/blur/store/blockcache/BlockDirectoryTest.java b/src/blur-store/src/test/java/com/nearinfinity/blur/store/blockcache/BlockDirectoryTest.java
deleted file mode 100644
index 79980c0..0000000
--- a/src/blur-store/src/test/java/com/nearinfinity/blur/store/blockcache/BlockDirectoryTest.java
+++ /dev/null
@@ -1,198 +0,0 @@
-package com.nearinfinity.blur.store.blockcache;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Map;
-import java.util.Random;
-
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
-
-public class BlockDirectoryTest {
-
-  private static final int MAX_NUMBER_OF_WRITES = 10000;
-  private static final int MIN_FILE_SIZE = 100;
-  private static final int MAX_FILE_SIZE = 100000;
-  private static final int MIN_BUFFER_SIZE = 1;
-  private static final int MAX_BUFFER_SIZE = 5000;
-  private static final int MAX_NUMBER_OF_READS = 20000;
-  private Directory directory;
-  private File file;
-  private long seed;
-  private Random random;
-
-  @Before
-  public void setUp() throws IOException {
-    file = new File("./tmp");
-    rm(file);
-    file.mkdirs();
-    FSDirectory dir = FSDirectory.open(new File(file, "base"));
-    directory = new BlockDirectory("test", dir, getBasicCache());
-    seed = new Random().nextLong();
-    random = new Random(seed);
-  }
-
-  private Cache getBasicCache() {
-    return new Cache() {
-      private Map<String, byte[]> map = new ConcurrentLinkedHashMap.Builder<String, byte[]>().maximumWeightedCapacity(8).build();
-
-      @Override
-      public void update(String name, long blockId, byte[] buffer) {
-        map.put(name + blockId, copy(buffer));
-      }
-
-      private byte[] copy(byte[] buffer) {
-        byte[] b = new byte[buffer.length];
-        System.arraycopy(buffer, 0, b, 0, buffer.length);
-        return b;
-      }
-
-      @Override
-      public boolean fetch(String name, long blockId, int blockOffset, byte[] b, int off, int lengthToReadInBlock) {
-        // return false;
-        byte[] data = map.get(name + blockId);
-        if (data == null) {
-          return false;
-        }
-        System.arraycopy(data, blockOffset, b, off, lengthToReadInBlock);
-        return true;
-      }
-
-      @Override
-      public void delete(String name) {
-
-      }
-
-      @Override
-      public long size() {
-        return map.size();
-      }
-    };
-  }
-
-  @Test
-  public void testEOF() throws IOException {
-    Directory fsDir = FSDirectory.open(new File(file, "normal"));
-    String name = "test.eof";
-    createFile(name, fsDir, directory);
-    long fsLength = fsDir.fileLength(name);
-    long hdfsLength = directory.fileLength(name);
-    assertEquals(fsLength, hdfsLength);
-    testEof(name, fsDir, fsLength);
-    testEof(name, directory, hdfsLength);
-  }
-
-  private void testEof(String name, Directory directory, long length) throws IOException {
-    IndexInput input = directory.openInput(name);
-    input.seek(length);
-    try {
-      input.readByte();
-      fail("should throw eof");
-    } catch (IOException e) {
-    }
-  }
-
-  @Test
-  public void testRandomAccessWrites() throws IOException {
-    int i = 0;
-    try {
-      for (; i < 10; i++) {
-        Directory fsDir = FSDirectory.open(new File(file, "normal"));
-        String name = getName();
-        createFile(name, fsDir, directory);
-        assertInputsEquals(name, fsDir, directory);
-      }
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail("Test failed with seed [" + seed + "] on pass [" + i + "]");
-    }
-  }
-
-  private void assertInputsEquals(String name, Directory fsDir, Directory hdfs) throws IOException {
-    int reads = random.nextInt(MAX_NUMBER_OF_READS);
-    int buffer = random.nextInt(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE) + MIN_BUFFER_SIZE;
-    IndexInput fsInput = fsDir.openInput(name, buffer);
-    IndexInput hdfsInput = hdfs.openInput(name, buffer);
-    assertEquals(fsInput.length(), hdfsInput.length());
-    int fileLength = (int) fsInput.length();
-    for (int i = 0; i < reads; i++) {
-      byte[] fsBuf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
-      byte[] hdfsBuf = new byte[fsBuf.length];
-      int offset = random.nextInt(fsBuf.length);
-      int length = random.nextInt(fsBuf.length - offset);
-      int pos = random.nextInt(fileLength - length);
-      fsInput.seek(pos);
-      fsInput.readBytes(fsBuf, offset, length);
-      hdfsInput.seek(pos);
-      hdfsInput.readBytes(hdfsBuf, offset, length);
-      for (int f = offset; f < length; f++) {
-        if (fsBuf[f] != hdfsBuf[f]) {
-          fail(Long.toString(seed) + " read [" + i + "]");
-        }
-      }
-    }
-    fsInput.close();
-    hdfsInput.close();
-  }
-
-  private void createFile(String name, Directory fsDir, Directory hdfs) throws IOException {
-    int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
-    int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
-    IndexOutput fsOutput = fsDir.createOutput(name);
-    fsOutput.setLength(fileLength);
-    IndexOutput hdfsOutput = hdfs.createOutput(name);
-    hdfsOutput.setLength(fileLength);
-    for (int i = 0; i < writes; i++) {
-      byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
-      random.nextBytes(buf);
-      int offset = random.nextInt(buf.length);
-      int length = random.nextInt(buf.length - offset);
-      fsOutput.writeBytes(buf, offset, length);
-      hdfsOutput.writeBytes(buf, offset, length);
-    }
-    fsOutput.close();
-    hdfsOutput.close();
-  }
-
-  private String getName() {
-    return Long.toString(Math.abs(random.nextLong()));
-  }
-
-  public static void rm(File file) {
-    if (!file.exists()) {
-      return;
-    }
-    if (file.isDirectory()) {
-      for (File f : file.listFiles()) {
-        rm(f);
-      }
-    }
-    file.delete();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/com/nearinfinity/blur/store/compressed/CompressedFieldDataDirectoryTest.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/com/nearinfinity/blur/store/compressed/CompressedFieldDataDirectoryTest.java b/src/blur-store/src/test/java/com/nearinfinity/blur/store/compressed/CompressedFieldDataDirectoryTest.java
deleted file mode 100644
index bfdc4e4..0000000
--- a/src/blur-store/src/test/java/com/nearinfinity/blur/store/compressed/CompressedFieldDataDirectoryTest.java
+++ /dev/null
@@ -1,141 +0,0 @@
-package com.nearinfinity.blur.store.compressed;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import static com.nearinfinity.blur.lucene.LuceneConstant.LUCENE_VERSION;
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.lucene.analysis.KeywordAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.TieredMergePolicy;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.LockObtainFailedException;
-import org.apache.lucene.store.RAMDirectory;
-import org.junit.Test;
-
-public class CompressedFieldDataDirectoryTest {
-
-  private static final CompressionCodec COMPRESSION_CODEC = CompressedFieldDataDirectory.DEFAULT_COMPRESSION;
-
-  @Test
-  public void testCompressedFieldDataDirectoryBasic() throws CorruptIndexException, IOException {
-    RAMDirectory dir = new RAMDirectory();
-    CompressedFieldDataDirectory directory = new CompressedFieldDataDirectory(dir, COMPRESSION_CODEC);
-    IndexWriterConfig config = new IndexWriterConfig(LUCENE_VERSION, new KeywordAnalyzer());
-    TieredMergePolicy mergePolicy = (TieredMergePolicy) config.getMergePolicy();
-    mergePolicy.setUseCompoundFile(false);
-    IndexWriter writer = new IndexWriter(directory, config);
-    addDocs(writer, 0, 10);
-    writer.close();
-    testFetches(directory);
-  }
-
-  @Test
-  public void testCompressedFieldDataDirectoryTransition() throws CorruptIndexException, LockObtainFailedException, IOException {
-    RAMDirectory dir = new RAMDirectory();
-
-    IndexWriterConfig config = new IndexWriterConfig(LUCENE_VERSION, new KeywordAnalyzer());
-    TieredMergePolicy mergePolicy = (TieredMergePolicy) config.getMergePolicy();
-    mergePolicy.setUseCompoundFile(false);
-    IndexWriter writer = new IndexWriter(dir, config);
-
-    addDocs(writer, 0, 5);
-    writer.close();
-
-    CompressedFieldDataDirectory directory = new CompressedFieldDataDirectory(dir, COMPRESSION_CODEC);
-    config = new IndexWriterConfig(LUCENE_VERSION, new KeywordAnalyzer());
-    mergePolicy = (TieredMergePolicy) config.getMergePolicy();
-    mergePolicy.setUseCompoundFile(false);
-    writer = new IndexWriter(directory, config);
-    addDocs(writer, 5, 5);
-    writer.close();
-    testFetches(directory);
-  }
-
-  @Test
-  public void testCompressedFieldDataDirectoryMixedBlockSize() throws CorruptIndexException, LockObtainFailedException, IOException {
-    RAMDirectory dir = new RAMDirectory();
-    IndexWriterConfig config = new IndexWriterConfig(LUCENE_VERSION, new KeywordAnalyzer());
-    TieredMergePolicy mergePolicy = (TieredMergePolicy) config.getMergePolicy();
-    mergePolicy.setUseCompoundFile(false);
-    IndexWriter writer = new IndexWriter(dir, config);
-    addDocs(writer, 0, 5);
-    writer.close();
-
-    CompressedFieldDataDirectory directory1 = new CompressedFieldDataDirectory(dir, COMPRESSION_CODEC, 2);
-    config = new IndexWriterConfig(LUCENE_VERSION, new KeywordAnalyzer());
-    mergePolicy = (TieredMergePolicy) config.getMergePolicy();
-    mergePolicy.setUseCompoundFile(false);
-    writer = new IndexWriter(directory1, config);
-    addDocs(writer, 5, 2);
-    writer.close();
-
-    CompressedFieldDataDirectory directory2 = new CompressedFieldDataDirectory(dir, COMPRESSION_CODEC, 4);
-    config = new IndexWriterConfig(LUCENE_VERSION, new KeywordAnalyzer());
-    mergePolicy = (TieredMergePolicy) config.getMergePolicy();
-    mergePolicy.setUseCompoundFile(false);
-    writer = new IndexWriter(directory2, config);
-    addDocs(writer, 7, 3);
-    writer.close();
-    testFetches(directory2);
-    testFileLengths(directory2);
-  }
-
-  private void testFileLengths(Directory dir) throws IOException {
-    String[] listAll = dir.listAll();
-    for (String name : listAll) {
-      IndexInput input = dir.openInput(name);
-      assertEquals(input.length(), dir.fileLength(name));
-      input.close();
-    }
-
-  }
-
-  private void testFetches(Directory directory) throws CorruptIndexException, IOException {
-    IndexReader reader = IndexReader.open(directory);
-    for (int i = 0; i < reader.maxDoc(); i++) {
-      String id = Integer.toString(i);
-      Document document = reader.document(i);
-      assertEquals(id, document.get("id"));
-    }
-  }
-
-  private void addDocs(IndexWriter writer, int starting, int amount) throws CorruptIndexException, IOException {
-    for (int i = 0; i < amount; i++) {
-      int index = starting + i;
-      writer.addDocument(getDoc(index));
-    }
-  }
-
-  private Document getDoc(int index) {
-    Document document = new Document();
-    document.add(new Field("id", Integer.toString(index), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
-    return document;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectory.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectory.java b/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectory.java
new file mode 100644
index 0000000..9bc9d3c
--- /dev/null
+++ b/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectory.java
@@ -0,0 +1,148 @@
+package org.apache.blur.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.UUID;
+
+import org.apache.blur.metrics.BlurMetrics;
+import org.apache.blur.store.blockcache.BlockCache;
+import org.apache.blur.store.blockcache.BlockDirectory;
+import org.apache.blur.store.blockcache.BlockDirectoryCache;
+import org.apache.blur.store.hdfs.HdfsDirectory;
+import org.apache.blur.store.lock.BlurLockFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.index.TieredMergePolicy;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.WildcardQuery;
+
+
+import static org.apache.blur.lucene.LuceneConstant.LUCENE_VERSION;
+
+public class BenchmarkDirectory {
+
+  public static void main(String[] args) throws IOException {
+    int blockSize = BlockDirectory.BLOCK_SIZE;
+    long totalMemory = BlockCache._128M * 2;
+    int slabSize = (int) (totalMemory / 2);
+
+    BlockCache blockCache = new BlockCache(new BlurMetrics(new Configuration()), true, totalMemory, slabSize, blockSize);
+    BlurMetrics metrics = new BlurMetrics(new Configuration());
+    BlockDirectoryCache cache = new BlockDirectoryCache(blockCache, metrics);
+
+    Configuration configuration = new Configuration();
+    Path p = new Path("hdfs://localhost:9000/bench");
+    BlurLockFactory factory = new BlurLockFactory(configuration, p, "localhost", 0);
+
+    FileSystem fs = FileSystem.get(p.toUri(), configuration);
+    fs.delete(p, true);
+
+    final HdfsDirectory dir = new HdfsDirectory(p);
+    dir.setLockFactory(factory);
+
+    BlockDirectory directory = new BlockDirectory("test", dir, cache);
+
+    while (true) {
+      long s, e;
+
+      s = System.currentTimeMillis();
+      IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, new StandardAnalyzer(LUCENE_VERSION));
+      TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
+      mergePolicy.setUseCompoundFile(false);
+      IndexWriter writer = new IndexWriter(directory, conf);
+      for (int i = 0; i < 1000000; i++) {
+        writer.addDocument(getDoc());
+      }
+      writer.close();
+      e = System.currentTimeMillis();
+      System.out.println("Indexing " + (e - s));
+
+      IndexReader reader = IndexReader.open(directory);
+      System.out.println("Docs " + reader.numDocs());
+      TermEnum terms = reader.terms();
+      List<Term> sample = new ArrayList<Term>();
+      int limit = 1000;
+      Random random = new Random();
+      SAMPLE: while (terms.next()) {
+        if (sample.size() < limit) {
+          if (random.nextInt() % 7 == 0) {
+            sample.add(terms.term());
+          }
+        } else {
+          break SAMPLE;
+        }
+      }
+      terms.close();
+
+      System.out.println("Sampling complete [" + sample.size() + "]");
+      IndexSearcher searcher = new IndexSearcher(reader);
+      long total = 0;
+      long time = 0;
+      int search = 10;
+      for (int i = 0; i < search; i++) {
+        s = System.currentTimeMillis();
+        TopDocs topDocs = searcher.search(new TermQuery(sample.get(random.nextInt(sample.size()))), 10);
+        total += topDocs.totalHits;
+        e = System.currentTimeMillis();
+        time += (e - s);
+      }
+      System.out.println("Searching " + time + " " + (time / (double) search) + " " + total);
+      for (int i = 0; i < 10; i++) {
+        s = System.currentTimeMillis();
+        TopDocs topDocs = searcher.search(new WildcardQuery(new Term("name", "fff*0*")), 10);
+        e = System.currentTimeMillis();
+        System.out.println(topDocs.totalHits + " " + (e - s));
+      }
+      reader.close();
+    }
+  }
+
+  private static Document getDoc() {
+    Document document = new Document();
+    document.add(new Field("name", UUID.randomUUID().toString(), Store.YES, Index.ANALYZED_NO_NORMS));
+    return document;
+  }
+
+  public static int getNumberOfSlabs(float heapPercentage, int numberOfBlocksPerSlab, int blockSize) {
+    long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
+    long targetBytes = (long) (max * heapPercentage);
+    int slabSize = numberOfBlocksPerSlab * blockSize;
+    int slabs = (int) (targetBytes / slabSize);
+    if (slabs == 0) {
+      throw new RuntimeException("Minimum heap size is 512m!");
+    }
+    return slabs;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectoryNrt.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectoryNrt.java b/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectoryNrt.java
new file mode 100644
index 0000000..4f1ccdd
--- /dev/null
+++ b/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectoryNrt.java
@@ -0,0 +1,160 @@
+package org.apache.blur.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.UUID;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.blur.metrics.BlurMetrics;
+import org.apache.blur.store.blockcache.BlockCache;
+import org.apache.blur.store.blockcache.BlockDirectory;
+import org.apache.blur.store.blockcache.BlockDirectoryCache;
+import org.apache.blur.store.hdfs.HdfsDirectory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.TieredMergePolicy;
+import org.apache.lucene.store.NoLockFactory;
+
+
+import static org.apache.blur.lucene.LuceneConstant.LUCENE_VERSION;
+
+public class BenchmarkDirectoryNrt {
+
+  public static void main(String[] args) throws IOException, InterruptedException {
+    int blockSize = BlockDirectory.BLOCK_SIZE;
+    long totalMemory = BlockCache._128M * 2;
+    int slabSize = (int) (totalMemory / 2);
+
+    BlockCache blockCache = new BlockCache(new BlurMetrics(new Configuration()), true, totalMemory, slabSize, blockSize);
+    BlurMetrics metrics = new BlurMetrics(new Configuration());
+    BlockDirectoryCache cache = new BlockDirectoryCache(blockCache, metrics);
+
+    Path p = new Path("hdfs://localhost:9000/bench");
+    FileSystem fs = FileSystem.get(p.toUri(), new Configuration());
+    fs.delete(p, true);
+
+    final HdfsDirectory dir = new HdfsDirectory(p);
+    dir.setLockFactory(NoLockFactory.getNoLockFactory());
+
+    BlockDirectory directory = new BlockDirectory("test", dir, cache);
+
+    while (true) {
+      IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, new StandardAnalyzer(LUCENE_VERSION));
+      TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
+      mergePolicy.setUseCompoundFile(false);
+      int count = 0;
+      int max = 10000;
+      long s = System.currentTimeMillis();
+      IndexWriter writer = new IndexWriter(directory, conf);
+      long as = System.currentTimeMillis();
+      BlockingQueue<Collection<Document>> queue = new ArrayBlockingQueue<Collection<Document>>(1024);
+      Indexer indexer = new Indexer(queue, writer);
+      new Thread(indexer).start();
+      for (int i = 0; i < 1000000; i++) {
+        if (count >= max) {
+          double aseconds = (System.currentTimeMillis() - as) / 1000.0;
+          double arate = i / aseconds;
+          double seconds = (System.currentTimeMillis() - s) / 1000.0;
+          double rate = count / seconds;
+          System.out.println("Total [" + i + "] Rate [" + rate + "] AvgRate [" + arate + "] Doc count [" + indexer.getReader().numDocs() + "]");
+          count = 0;
+          s = System.currentTimeMillis();
+        }
+        queue.put(Arrays.asList(getDoc()));
+        count++;
+      }
+      writer.close();
+    }
+  }
+
+  private static class Indexer implements Runnable {
+
+    private BlockingQueue<Collection<Document>> _queue;
+    private AtomicBoolean _running = new AtomicBoolean(true);
+    private IndexWriter _writer;
+    private IndexReader _reader;
+
+    public Indexer(BlockingQueue<Collection<Document>> queue, IndexWriter writer) throws CorruptIndexException, IOException {
+      _queue = queue;
+      _writer = writer;
+      _reader = IndexReader.open(_writer, true);
+    }
+
+    public IndexReader getReader() {
+      return _reader;
+    }
+
+    @Override
+    public void run() {
+      long cycleTime = 50000000;
+      long start = System.nanoTime();
+      while (_running.get()) {
+        try {
+          Collection<Document> docs = _queue.take();
+          _writer.addDocuments(docs);
+          if (start + cycleTime < System.nanoTime()) {
+            IndexReader newReader = IndexReader.open(_writer, true);
+            _reader.close();
+            _reader = newReader;
+            start = System.nanoTime();
+          }
+        } catch (InterruptedException e) {
+          return;
+        } catch (CorruptIndexException e) {
+          e.printStackTrace();
+          return;
+        } catch (IOException e) {
+          e.printStackTrace();
+          return;
+        }
+      }
+    }
+  }
+
+  private static Document getDoc() {
+    Document document = new Document();
+    document.add(new Field("name", UUID.randomUUID().toString(), Store.YES, Index.ANALYZED_NO_NORMS));
+    return document;
+  }
+
+  public static int getNumberOfSlabs(float heapPercentage, int numberOfBlocksPerSlab, int blockSize) {
+    long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
+    long targetBytes = (long) (max * heapPercentage);
+    int slabSize = numberOfBlocksPerSlab * blockSize;
+    int slabs = (int) (targetBytes / slabSize);
+    if (slabs == 0) {
+      throw new RuntimeException("Minimum heap size is 512m!");
+    }
+    return slabs;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/org/apache/blur/store/HdfsDirectoryTest.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/org/apache/blur/store/HdfsDirectoryTest.java b/src/blur-store/src/test/java/org/apache/blur/store/HdfsDirectoryTest.java
new file mode 100644
index 0000000..ef9e4cb
--- /dev/null
+++ b/src/blur-store/src/test/java/org/apache/blur/store/HdfsDirectoryTest.java
@@ -0,0 +1,203 @@
+package org.apache.blur.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.blur.store.hdfs.HdfsDirectory;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RAMDirectory;
+import org.junit.Before;
+import org.junit.Test;
+
+
+public class HdfsDirectoryTest {
+
+  private static final int MAX_NUMBER_OF_WRITES = 10000;
+  private static final int MIN_FILE_SIZE = 100;
+  private static final int MAX_FILE_SIZE = 100000;
+  private static final int MIN_BUFFER_SIZE = 1;
+  private static final int MAX_BUFFER_SIZE = 5000;
+  private static final int MAX_NUMBER_OF_READS = 10000;
+  private HdfsDirectory directory;
+  private File file;
+  private long seed;
+  private Random random;
+
+  @Before
+  public void setUp() throws IOException {
+    file = new File("./tmp");
+    rm(file);
+    URI uri = new File(file, "hdfs").toURI();
+    Path hdfsDirPath = new Path(uri.toString());
+    directory = new HdfsDirectory(hdfsDirPath);
+    seed = new Random().nextLong();
+    // seed = 7392202912208392081L;
+    random = new Random(seed);
+  }
+
+  @Test
+  public void testWritingAndReadingAFile() throws IOException {
+
+    IndexOutput output = directory.createOutput("testing.test");
+    output.writeInt(12345);
+    output.flush();
+    output.close();
+
+    IndexInput input = directory.openInput("testing.test");
+    assertEquals(12345, input.readInt());
+    input.close();
+
+    String[] listAll = directory.listAll();
+    assertEquals(1, listAll.length);
+    assertEquals("testing.test", listAll[0]);
+
+    assertEquals(4, directory.fileLength("testing.test"));
+
+    IndexInput input1 = directory.openInput("testing.test");
+
+    IndexInput input2 = (IndexInput) input1.clone();
+    assertEquals(12345, input2.readInt());
+    input2.close();
+
+    assertEquals(12345, input1.readInt());
+    input1.close();
+
+    assertFalse(directory.fileExists("testing.test.other"));
+    assertTrue(directory.fileExists("testing.test"));
+    directory.deleteFile("testing.test");
+    assertFalse(directory.fileExists("testing.test"));
+  }
+
+  @Test
+  public void testEOF() throws IOException {
+    Directory fsDir = new RAMDirectory();
+    String name = "test.eof";
+    createFile(name, fsDir, directory);
+    long fsLength = fsDir.fileLength(name);
+    long hdfsLength = directory.fileLength(name);
+    assertEquals(fsLength, hdfsLength);
+    testEof(name, fsDir, fsLength);
+    testEof(name, directory, hdfsLength);
+  }
+
+  private void testEof(String name, Directory directory, long length) throws IOException {
+    IndexInput input = directory.openInput(name);
+    input.seek(length);
+    try {
+      input.readByte();
+      fail("should throw eof");
+    } catch (IOException e) {
+    }
+  }
+
+  @Test
+  public void testRandomAccessWrites() throws IOException {
+    int i = 0;
+    try {
+      Set<String> names = new HashSet<String>();
+      for (; i < 10; i++) {
+        Directory fsDir = new RAMDirectory();
+        String name = getName();
+        System.out.println("Working on pass [" + i + "] seed [" + seed + "] contains [" + names.contains(name) + "]");
+        names.add(name);
+        createFile(name, fsDir, directory);
+        assertInputsEquals(name, fsDir, directory);
+        fsDir.close();
+      }
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail("Test failed with seed [" + seed + "] on pass [" + i + "]");
+    }
+  }
+
+  private void assertInputsEquals(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException {
+    int reads = random.nextInt(MAX_NUMBER_OF_READS);
+    int buffer = random.nextInt(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE) + MIN_BUFFER_SIZE;
+    IndexInput fsInput = fsDir.openInput(name, buffer);
+    IndexInput hdfsInput = hdfs.openInput(name, buffer);
+    assertEquals(fsInput.length(), hdfsInput.length());
+    int fileLength = (int) fsInput.length();
+    for (int i = 0; i < reads; i++) {
+      byte[] fsBuf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
+      byte[] hdfsBuf = new byte[fsBuf.length];
+      int offset = random.nextInt(fsBuf.length);
+      int length = random.nextInt(fsBuf.length - offset);
+      int pos = random.nextInt(fileLength - length);
+      fsInput.seek(pos);
+      fsInput.readBytes(fsBuf, offset, length);
+      hdfsInput.seek(pos);
+      hdfsInput.readBytes(hdfsBuf, offset, length);
+      for (int f = offset; f < length; f++) {
+        if (fsBuf[f] != hdfsBuf[f]) {
+          fail();
+        }
+      }
+    }
+    fsInput.close();
+    hdfsInput.close();
+  }
+
+  private void createFile(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException {
+    int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
+    int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
+    IndexOutput fsOutput = fsDir.createOutput(name);
+    fsOutput.setLength(fileLength);
+    IndexOutput hdfsOutput = hdfs.createOutput(name);
+    hdfsOutput.setLength(fileLength);
+    for (int i = 0; i < writes; i++) {
+      byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
+      random.nextBytes(buf);
+      int offset = random.nextInt(buf.length);
+      int length = random.nextInt(buf.length - offset);
+      fsOutput.writeBytes(buf, offset, length);
+      hdfsOutput.writeBytes(buf, offset, length);
+    }
+    fsOutput.close();
+    hdfsOutput.close();
+  }
+
+  private String getName() {
+    return Long.toString(Math.abs(random.nextLong()));
+  }
+
+  public static void rm(File file) {
+    if (!file.exists()) {
+      return;
+    }
+    if (file.isDirectory()) {
+      for (File f : file.listFiles()) {
+        rm(f);
+      }
+    }
+    file.delete();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/33df9310/src/blur-store/src/test/java/org/apache/blur/store/UsingHdfsDir.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/org/apache/blur/store/UsingHdfsDir.java b/src/blur-store/src/test/java/org/apache/blur/store/UsingHdfsDir.java
new file mode 100644
index 0000000..302a7a1
--- /dev/null
+++ b/src/blur-store/src/test/java/org/apache/blur/store/UsingHdfsDir.java
@@ -0,0 +1,112 @@
+package org.apache.blur.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import static org.apache.blur.lucene.LuceneConstant.LUCENE_VERSION;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.blur.store.hdfs.HdfsDirectory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.NoLockFactory;
+
+
+public class UsingHdfsDir {
+
+  public static void main(String[] args) throws IOException {
+
+    // FileSystem fs = FileSystem.getLocal(new Configuration());
+    // Path p = new Path("file:///tmp/testdir");
+
+    Path p = new Path("hdfs://localhost:9000/test-dir");
+    FileSystem fs = FileSystem.get(p.toUri(), new Configuration());
+    fs.delete(p, true);
+
+    final HdfsDirectory directory = new HdfsDirectory(p);
+    directory.setLockFactory(NoLockFactory.getNoLockFactory());
+
+    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(LUCENE_VERSION, new StandardAnalyzer(LUCENE_VERSION)));
+    for (int i = 0; i < 100000; i++) {
+      writer.addDocument(getDoc());
+    }
+    writer.close();
+
+    IndexReader reader = IndexReader.open(directory);
+    TermEnum terms = reader.terms();
+    while (terms.next()) {
+      System.out.println(terms.term());
+    }
+    terms.close();
+
+    IndexSearcher searcher = new IndexSearcher(reader);
+    TopDocs topDocs = searcher.search(new TermQuery(new Term("name", "ffff")), 10);
+    System.out.println(topDocs.totalHits);
+
+    reader.close();
+
+    List<String> files = new ArrayList<String>(Arrays.asList(directory.listAll()));
+    Collections.sort(files, new Comparator<String>() {
+      @Override
+      public int compare(String o1, String o2) {
+        try {
+          long fileLength1 = directory.fileLength(o1);
+          long fileLength2 = directory.fileLength(o2);
+          if (fileLength1 == fileLength2) {
+            return o1.compareTo(o2);
+          }
+          return (int) (fileLength2 - fileLength1);
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+      }
+    });
+
+    for (String file : files) {
+      System.out.println(file + " " + directory.fileLength(file));
+    }
+
+    directory.close();
+  }
+
+  private static Document getDoc() {
+    Document document = new Document();
+    document.add(new Field("name", UUID.randomUUID().toString(), Store.YES, Index.ANALYZED_NO_NORMS));
+    return document;
+  }
+
+}


Mime
View raw message