incubator-blur-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From amccu...@apache.org
Subject [25/28] Initial commit of the back port. The blur-util, blur-store, have been completed. Also a new distribution project help with the building of the project. Also all of the pom files have been updated to the new version. This is very much a work i
Date Mon, 18 Mar 2013 01:10:31 GMT
http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/blur/store/hdfs/ChangeFileExt.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/ChangeFileExt.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/ChangeFileExt.java
deleted file mode 100644
index 52f7e02..0000000
--- a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/ChangeFileExt.java
+++ /dev/null
@@ -1,38 +0,0 @@
-package org.apache.blur.store.hdfs;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-public class ChangeFileExt {
-
-  public static void main(String[] args) throws IOException {
-    Path p = new Path(args[0]);
-    FileSystem fileSystem = FileSystem.get(p.toUri(), new Configuration());
-    FileStatus[] listStatus = fileSystem.listStatus(p);
-    for (FileStatus fileStatus : listStatus) {
-      Path path = fileStatus.getPath();
-      fileSystem.rename(path, new Path(path.toString() + ".lf"));
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/blur/store/hdfs/ConvertDirectory.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/ConvertDirectory.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/ConvertDirectory.java
deleted file mode 100644
index 2763ba4..0000000
--- a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/ConvertDirectory.java
+++ /dev/null
@@ -1,62 +0,0 @@
-package org.apache.blur.store.hdfs;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.store.LockObtainFailedException;
-
-public class ConvertDirectory {
-
-  public static void main(String[] args) throws CorruptIndexException, LockObtainFailedException, IOException {
-    Path path = new Path(args[0]);
-    convert(path);
-  }
-
-  public static void convert(Path path) throws IOException {
-    FileSystem fileSystem = FileSystem.get(path.toUri(), new Configuration());
-    if (!fileSystem.exists(path)) {
-      System.out.println(path + " does not exists.");
-      return;
-    }
-    FileStatus fileStatus = fileSystem.getFileStatus(path);
-    if (fileStatus.isDir()) {
-      FileStatus[] listStatus = fileSystem.listStatus(path);
-      for (FileStatus status : listStatus) {
-        convert(status.getPath());
-      }
-    } else {
-      System.out.println("Converting file [" + path + "]");
-      HdfsMetaBlock block = new HdfsMetaBlock();
-      block.realPosition = 0;
-      block.logicalPosition = 0;
-      block.length = fileStatus.getLen();
-      FSDataOutputStream outputStream = fileSystem.append(path);
-      block.write(outputStream);
-      outputStream.writeInt(1);
-      outputStream.writeLong(fileStatus.getLen());
-      outputStream.writeInt(HdfsFileWriter.VERSION);
-      outputStream.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/blur/store/hdfs/CopyFromHdfsLocal.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/CopyFromHdfsLocal.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/CopyFromHdfsLocal.java
deleted file mode 100644
index 1144126..0000000
--- a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/CopyFromHdfsLocal.java
+++ /dev/null
@@ -1,48 +0,0 @@
-package org.apache.blur.store.hdfs;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.blur.store.compressed.CompressedFieldDataDirectory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.compress.DefaultCodec;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-
-
-public class CopyFromHdfsLocal {
-
-  public static void main(String[] args) throws IOException {
-    Path path = new Path(args[0]);
-    HdfsDirectory src = new HdfsDirectory(path);
-
-    for (String name : src.listAll()) {
-      System.out.println(name);
-    }
-
-    CompressedFieldDataDirectory compressedDirectory = new CompressedFieldDataDirectory(src, new DefaultCodec(), 32768);
-    Directory dest = FSDirectory.open(new File(args[1]));
-
-    for (String name : compressedDirectory.listAll()) {
-      compressedDirectory.copy(dest, name, name);
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsDirectory.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsDirectory.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsDirectory.java
index e3bc7ca..0e92867 100644
--- a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsDirectory.java
+++ b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsDirectory.java
@@ -16,362 +16,323 @@ package org.apache.blur.store.hdfs;
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
+import static org.apache.blur.metrics.MetricsConstants.HDFS;
+import static org.apache.blur.metrics.MetricsConstants.ORG_APACHE_BLUR;
+
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.blur.store.CustomBufferedIndexInput;
+import java.net.URI;
+import java.util.Collection;
+import java.util.Map;
+import java.util.WeakHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.blur.log.Log;
+import org.apache.blur.log.LogFactory;
+import org.apache.blur.store.blockcache.BlockDirectory;
+import org.apache.blur.store.buffer.ReusedBufferedIndexInput;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.lucene.store.BufferedIndexOutput;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.NoLockFactory;
 
-
+import com.yammer.metrics.Metrics;
+import com.yammer.metrics.core.Histogram;
+import com.yammer.metrics.core.Meter;
+import com.yammer.metrics.core.MetricName;
 public class HdfsDirectory extends Directory {
 
-  public static final int BUFFER_SIZE = 8192;
-
-  private static final String LF_EXT = ".lf";
-  protected static final String SEGMENTS_GEN = "segments.gen";
-  protected static final IndexOutput NULL_WRITER = new NullIndexOutput();
-  protected Path _hdfsDirPath;
-  protected AtomicReference<FileSystem> _fileSystemRef = new AtomicReference<FileSystem>();
-  protected Configuration _configuration;
-
-  public HdfsDirectory(Path hdfsDirPath) throws IOException {
-    _hdfsDirPath = hdfsDirPath;
-    _configuration = new Configuration();
-    reopenFileSystem();
-    try {
-      if (!getFileSystem().exists(hdfsDirPath)) {
-        getFileSystem().mkdirs(hdfsDirPath);
+  
+
+  private static final Log LOG = LogFactory.getLog(HdfsDirectory.class);
+
+  private static AtomicLong deleteCounter = new AtomicLong();
+  private static AtomicLong existsCounter = new AtomicLong();
+  private static AtomicLong fileStatusCounter = new AtomicLong();
+  private static AtomicLong renameCounter = new AtomicLong();
+  private static AtomicLong listCounter = new AtomicLong();
+  private static AtomicLong createCounter = new AtomicLong();
+  private static AtomicLong isFileCounter = new AtomicLong();
+
+  static {
+    Thread thread = new Thread(new Runnable() {
+      @Override
+      public void run() {
+        while (true) {
+          LOG.debug("Delete Counter [" + deleteCounter + "]");
+          LOG.debug("Exists Counter [" + existsCounter + "]");
+          LOG.debug("File Status Counter [" + fileStatusCounter + "]");
+          LOG.debug("Rename Counter [" + renameCounter + "]");
+          LOG.debug("List Counter [" + listCounter + "]");
+          LOG.debug("Create Counter [" + createCounter + "]");
+          LOG.debug("IsFile Counter [" + isFileCounter + "]");
+          try {
+            Thread.sleep(5000);
+          } catch (InterruptedException e) {
+            return;
+          }
+        }
       }
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-
-  }
-
-  @Override
-  public IndexOutput createOutput(String name) throws IOException {
-    if (SEGMENTS_GEN.equals(name)) {
-      return NULL_WRITER;
-    }
-    name = getRealName(name);
-    HdfsFileWriter writer = new HdfsFileWriter(getFileSystem(), new Path(_hdfsDirPath, name));
-    return new HdfsLayeredIndexOutput(writer);
-  }
-
-  private String getRealName(String name) throws IOException {
-    if (getFileSystem().exists(new Path(_hdfsDirPath, name))) {
-      return name;
-    }
-    return name + LF_EXT;
-  }
-
-  private String[] getNormalNames(List<String> files) {
-    int size = files.size();
-    for (int i = 0; i < size; i++) {
-      String str = files.get(i);
-      files.set(i, toNormalName(str));
-    }
-    return files.toArray(new String[] {});
-  }
-
-  private String toNormalName(String name) {
-    if (name.endsWith(LF_EXT)) {
-      return name.substring(0, name.length() - 3);
-    }
-    return name;
-  }
-
-  @Override
-  public IndexInput openInput(String name) throws IOException {
-    return openInput(name, BUFFER_SIZE);
-  }
-
-  @Override
-  public IndexInput openInput(String name, int bufferSize) throws IOException {
-    name = getRealName(name);
-    if (isLayeredFile(name)) {
-      HdfsFileReader reader = new HdfsFileReader(getFileSystem(), new Path(_hdfsDirPath, name), BUFFER_SIZE);
-      return new HdfsLayeredIndexInput(name, reader, BUFFER_SIZE);
-    } else {
-      return new HdfsNormalIndexInput(name, getFileSystem(), new Path(_hdfsDirPath, name), BUFFER_SIZE);
-    }
-  }
-
-  private boolean isLayeredFile(String name) {
-    if (name.endsWith(LF_EXT)) {
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  public void deleteFile(String name) throws IOException {
-    name = getRealName(name);
-    if (!fileExists(name)) {
-      throw new FileNotFoundException(name);
-    }
-    getFileSystem().delete(new Path(_hdfsDirPath, name), false);
+    });
+    thread.setName("HDFS dir counter logger");
+    thread.setDaemon(true);
+    thread.start();
   }
 
-  @Override
-  public boolean fileExists(String name) throws IOException {
-    name = getRealName(name);
-    return getFileSystem().exists(new Path(_hdfsDirPath, name));
-  }
+  private final Path path;
+  private final FileSystem fileSystem;
+  private final MetricsGroup metricsGroup;
 
-  @Override
-  public long fileLength(String name) throws IOException {
-    name = getRealName(name);
-    if (!fileExists(name)) {
-      throw new FileNotFoundException(name);
-    }
-    return HdfsFileReader.getLength(getFileSystem(), new Path(_hdfsDirPath, name));
-  }
+  static class MetricsGroup {
+    final Histogram readAccess;
+    final Histogram writeAccess;
+    final Meter writeThroughput;
+    final Meter readThroughput;
 
-  @Override
-  public long fileModified(String name) throws IOException {
-    name = getRealName(name);
-    if (!fileExists(name)) {
-      throw new FileNotFoundException(name);
+    MetricsGroup(Histogram readAccess, Histogram writeAccess, Meter readThroughput, Meter writeThroughput) {
+      this.readAccess = readAccess;
+      this.writeAccess = writeAccess;
+      this.readThroughput = readThroughput;
+      this.writeThroughput = writeThroughput;
     }
-    FileStatus fileStatus = getFileSystem().getFileStatus(new Path(_hdfsDirPath, name));
-    return fileStatus.getModificationTime();
   }
-
-  @Override
-  public String[] listAll() throws IOException {
-    FileStatus[] listStatus = getFileSystem().listStatus(_hdfsDirPath);
-    List<String> files = new ArrayList<String>();
-    if (listStatus == null) {
-      return new String[] {};
-    }
-    for (FileStatus status : listStatus) {
-      if (!status.isDir()) {
-        files.add(status.getPath().getName());
+  
+  /**
+   * We keep the metrics separate per filesystem.
+   */
+  private static Map<URI, MetricsGroup> metricsGroupMap = new WeakHashMap<URI, MetricsGroup>();
+
+  public HdfsDirectory(Configuration configuration, Path path) throws IOException {
+    this.path = path;
+    fileSystem = path.getFileSystem(configuration);
+    setLockFactory(NoLockFactory.getNoLockFactory());
+    synchronized (metricsGroupMap) {
+      URI uri = fileSystem.getUri();
+      MetricsGroup metricsGroup = metricsGroupMap.get(uri);
+      if (metricsGroup == null) {
+        String scope = uri.toString();
+        
+        Histogram readAccess = Metrics.newHistogram(new MetricName(ORG_APACHE_BLUR, HDFS, "Read Latency in \u00B5s", scope));
+        Histogram writeAccess = Metrics.newHistogram(new MetricName(ORG_APACHE_BLUR, HDFS, "Write Latency in \u00B5s", scope));
+        Meter readThroughput = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, HDFS, "Read Throughput", scope), "Read Bytes", TimeUnit.SECONDS);
+        Meter writeThroughput = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, HDFS, "Write Throughput", scope), "Write Bytes", TimeUnit.SECONDS);
+        metricsGroup = new MetricsGroup(readAccess, writeAccess, readThroughput, writeThroughput);
+        metricsGroupMap.put(uri, metricsGroup);
       }
+      this.metricsGroup = metricsGroup;
     }
-    return getNormalNames(files);
   }
 
   @Override
-  public void touchFile(String name) throws IOException {
-    // do nothing
-  }
-
-  public Path getHdfsDirPath() {
-    return _hdfsDirPath;
-  }
-
-  public FileSystem getFileSystem() {
-    return _fileSystemRef.get();
-  }
-
-  protected void reopenFileSystem() throws IOException {
-    FileSystem fileSystem = FileSystem.get(_hdfsDirPath.toUri(), _configuration);
-    FileSystem oldFs = _fileSystemRef.get();
-    _fileSystemRef.set(fileSystem);
-    if (oldFs != null) {
-      oldFs.close();
-    }
+  public String toString() {
+    return "HdfsDirectory path=[" + path + "]";
   }
 
-  static class HdfsLayeredIndexInput extends CustomBufferedIndexInput {
+  public static class HdfsIndexInput extends ReusedBufferedIndexInput {
 
-    private HdfsFileReader _reader;
-    private long _length;
+    private final long len;
+    private FSDataInputStream inputStream;
     private boolean isClone;
+    private final MetricsGroup metricsGroup;
 
-    public HdfsLayeredIndexInput(String name, HdfsFileReader reader, int bufferSize) {
-      super(name, bufferSize);
-      _reader = reader;
-      _length = _reader.length();
-    }
-
-    @Override
-    protected void closeInternal() throws IOException {
-      if (!isClone) {
-        _reader.close();
-      }
+    public HdfsIndexInput(FileSystem fileSystem, Path filePath, MetricsGroup metricsGroup) throws IOException {
+      super(filePath.toString());
+      inputStream = fileSystem.open(filePath);
+      FileStatus fileStatus = fileSystem.getFileStatus(filePath);
+      len = fileStatus.getLen();
+      this.metricsGroup = metricsGroup;
     }
 
     @Override
     public long length() {
-      return _length;
-    }
-
-    @Override
-    public Object clone() {
-      HdfsLayeredIndexInput input = (HdfsLayeredIndexInput) super.clone();
-      input.isClone = true;
-      input._reader = (HdfsFileReader) _reader.clone();
-      return input;
-    }
-
-    @Override
-    protected void readInternal(byte[] b, int offset, int length) throws IOException {
-      long position = getFilePointer();
-      _reader.seek(position);
-      _reader.readBytes(b, offset, length);
+      return len;
     }
 
     @Override
     protected void seekInternal(long pos) throws IOException {
-      // do nothing
-    }
-  }
-
-  static class HdfsNormalIndexInput extends CustomBufferedIndexInput {
 
-    private final FSDataInputStream _inputStream;
-    private final long _length;
-    private boolean _clone = false;
-
-    public HdfsNormalIndexInput(String name, FileSystem fileSystem, Path path, int bufferSize) throws IOException {
-      super(name);
-      FileStatus fileStatus = fileSystem.getFileStatus(path);
-      _length = fileStatus.getLen();
-      _inputStream = fileSystem.open(path, bufferSize);
     }
 
     @Override
     protected void readInternal(byte[] b, int offset, int length) throws IOException {
-      _inputStream.read(getFilePointer(), b, offset, length);
-    }
-
-    @Override
-    protected void seekInternal(long pos) throws IOException {
-
+      synchronized (inputStream) {
+        long start = System.nanoTime();
+        inputStream.seek(getFilePointer());
+        inputStream.readFully(b, offset, length);
+        long end = System.nanoTime();
+        metricsGroup.readAccess.update((end - start) / 1000);
+        metricsGroup.readThroughput.mark(length);
+      }
     }
 
     @Override
     protected void closeInternal() throws IOException {
-      if (!_clone) {
-        _inputStream.close();
+      if (!isClone) {
+        inputStream.close();
       }
     }
 
     @Override
-    public long length() {
-      return _length;
-    }
-
-    @Override
-    public Object clone() {
-      HdfsNormalIndexInput clone = (HdfsNormalIndexInput) super.clone();
-      clone._clone = true;
+    public ReusedBufferedIndexInput clone() {
+      HdfsIndexInput clone = (HdfsIndexInput) super.clone();
+      clone.isClone = true;
       return clone;
     }
   }
 
-  static class HdfsLayeredIndexOutput extends IndexOutput {
-
-    private HdfsFileWriter _writer;
-
-    public HdfsLayeredIndexOutput(HdfsFileWriter writer) {
-      _writer = writer;
+  @Override
+  public IndexOutput createOutput(String name, IOContext context) throws IOException {
+    if (fileExists(name)) {
+      throw new IOException("File [" + name + "] already exists found.");
     }
+    final FSDataOutputStream outputStream = fileSystem.create(getPath(name));
+    createCounter.incrementAndGet();
+    return new BufferedIndexOutput() {
 
-    @Override
-    public void close() throws IOException {
-      _writer.close();
-    }
+      @Override
+      public long length() throws IOException {
+        return outputStream.getPos();
+      }
 
-    @Override
-    public void flush() throws IOException {
+      @Override
+      protected void flushBuffer(byte[] b, int offset, int len) throws IOException {
+        long start = System.nanoTime();
+        outputStream.write(b, offset, len);
+        long end = System.nanoTime();
+        metricsGroup.writeAccess.update((end - start) / 1000);
+        metricsGroup.writeThroughput.mark(len);
+      }
 
-    }
+      @Override
+      public void close() throws IOException {
+        super.close();
+        outputStream.close();
+      }
 
-    @Override
-    public long getFilePointer() {
-      return _writer.getPosition();
-    }
+      @Override
+      public void seek(long pos) throws IOException {
+        throw new IOException("seeks not allowed on IndexOutputs.");
+      }
+    };
+  }
 
-    @Override
-    public long length() throws IOException {
-      return _writer.length();
+  @Override
+  public IndexInput openInput(String name, IOContext context) throws IOException {
+    if (!fileExists(name)) {
+      throw new FileNotFoundException("File [" + name + "] not found.");
     }
+    Path filePath = getPath(name);
+    return new HdfsIndexInput(fileSystem, filePath, metricsGroup);
+  }
 
-    @Override
-    public void seek(long pos) throws IOException {
-      _writer.seek(pos);
+  @Override
+  public String[] listAll() throws IOException {
+    listCounter.incrementAndGet();
+    FileStatus[] files = fileSystem.listStatus(path, new PathFilter() {
+      @Override
+      public boolean accept(Path path) {
+        try {
+          isFileCounter.incrementAndGet();
+          return fileSystem.isFile(path);
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+      }
+    });
+    String[] result = new String[files.length];
+    for (int i = 0; i < result.length; i++) {
+      result[i] = files[i].getPath().getName();
     }
+    return result;
+  }
 
-    @Override
-    public void writeByte(byte b) throws IOException {
-      _writer.writeByte(b);
-    }
+  @Override
+  public boolean fileExists(String name) throws IOException {
+    existsCounter.incrementAndGet();
+    return fileSystem.exists(getPath(name));
+  }
 
-    @Override
-    public void writeBytes(byte[] b, int offset, int length) throws IOException {
-      _writer.writeBytes(b, offset, length);
+  @Override
+  public void deleteFile(String name) throws IOException {
+    if (fileExists(name)) {
+      deleteCounter.incrementAndGet();
+      fileSystem.delete(getPath(name), true);
+    } else {
+      throw new FileNotFoundException("File [" + name + "] not found");
     }
   }
 
-  static class DirectIOHdfsIndexInput extends CustomBufferedIndexInput {
-
-    private long _length;
-    private FSDataInputStream _inputStream;
-    private boolean isClone;
+  @Override
+  public long fileLength(String name) throws IOException {
+    fileStatusCounter.incrementAndGet();
+    FileStatus fileStatus = fileSystem.getFileStatus(getPath(name));
+    return fileStatus.getLen();
+  }
 
-    public DirectIOHdfsIndexInput(String name, FSDataInputStream inputStream, long length) throws IOException {
-      super(name);
-      if (inputStream instanceof DFSDataInputStream) {
-        // This is needed because if the file was in progress of being written
-        // but
-        // was not closed the
-        // length of the file is 0. This will fetch the synced length of the
-        // file.
-        _length = ((DFSDataInputStream) inputStream).getVisibleLength();
-      } else {
-        _length = length;
-      }
-      _inputStream = inputStream;
-    }
+  @Override
+  public void sync(Collection<String> names) throws IOException {
 
-    @Override
-    public long length() {
-      return _length;
-    }
+  }
 
-    @Override
-    protected void closeInternal() throws IOException {
-      if (!isClone) {
-        _inputStream.close();
-      }
-    }
+  @Override
+  public void close() throws IOException {
+    fileSystem.close();
+  }
 
-    @Override
-    protected void seekInternal(long pos) throws IOException {
+  private Path getPath(String name) {
+    return new Path(path, name);
+  }
 
+  public long getFileModified(String name) throws IOException {
+    if (!fileExists(name)) {
+      throw new FileNotFoundException("File [" + name + "] not found");
     }
+    Path file = getPath(name);
+    fileStatusCounter.incrementAndGet();
+    return fileSystem.getFileStatus(file).getModificationTime();
+  }
 
-    @Override
-    protected void readInternal(byte[] b, int offset, int length) throws IOException {
-      synchronized (_inputStream) {
-        _inputStream.readFully(getFilePointer(), b, offset, length);
+  @Override
+  public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
+    if (to instanceof HdfsDirectory) {
+      if (quickMove(to, src, dest, context)) {
+        return;
+      }
+    } else if (to instanceof BlockDirectory) {
+      BlockDirectory bd = (BlockDirectory) to;
+      Directory inner = bd.getDirectory();
+      if (quickMove(inner, src, dest, context)) {
+        return;
       }
     }
+    super.copy(to, src, dest, context);
+  }
 
-    @Override
-    public Object clone() {
-      DirectIOHdfsIndexInput clone = (DirectIOHdfsIndexInput) super.clone();
-      clone.isClone = true;
-      return clone;
+  private boolean quickMove(Directory to, String src, String dest, IOContext context) throws IOException {
+    HdfsDirectory simpleTo = (HdfsDirectory) to;
+    if (ifSameCluster(to, this)) {
+      Path newDest = simpleTo.getPath(dest);
+      Path oldSrc = getPath(src);
+      renameCounter.incrementAndGet();
+      return fileSystem.rename(oldSrc, newDest);
     }
+    return false;
   }
+
+  private boolean ifSameCluster(Directory to, HdfsDirectory simpleHDFSDirectory) {
+    // @TODO
+    return true;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileReader.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileReader.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileReader.java
deleted file mode 100644
index 35ecaf8..0000000
--- a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileReader.java
+++ /dev/null
@@ -1,188 +0,0 @@
-package org.apache.blur.store.hdfs;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.blur.log.Log;
-import org.apache.blur.log.LogFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.store.DataInput;
-
-
-public class HdfsFileReader extends DataInput {
-
-  private static final Log LOG = LogFactory.getLog(HdfsFileReader.class);
-
-  private static final int VERSION = -1;
-
-  private final long _length;
-  private final long _hdfsLength;
-  private final List<HdfsMetaBlock> _metaBlocks;
-  private FSDataInputStream _inputStream;
-  private long _logicalPos;
-  private long _boundary;
-  private long _realPos;
-  private boolean isClone;
-
-  public HdfsFileReader(FileSystem fileSystem, Path path, int bufferSize) throws IOException {
-    if (!fileSystem.exists(path)) {
-      throw new FileNotFoundException(path.toString());
-    }
-    FileStatus fileStatus = fileSystem.getFileStatus(path);
-    _hdfsLength = fileStatus.getLen();
-    _inputStream = fileSystem.open(path, bufferSize);
-
-    // read meta blocks
-    _inputStream.seek(_hdfsLength - 16);
-    int numberOfBlocks = _inputStream.readInt();
-    _length = _inputStream.readLong();
-    int version = _inputStream.readInt();
-    if (version != VERSION) {
-      throw new RuntimeException("Version of file [" + version + "] does not match reader [" + VERSION + "]");
-    }
-    _inputStream.seek(_hdfsLength - 16 - (numberOfBlocks * 24)); // 3 longs per
-                                                                 // block
-    _metaBlocks = new ArrayList<HdfsMetaBlock>(numberOfBlocks);
-    for (int i = 0; i < numberOfBlocks; i++) {
-      HdfsMetaBlock hdfsMetaBlock = new HdfsMetaBlock();
-      hdfsMetaBlock.readFields(_inputStream);
-      _metaBlocks.add(hdfsMetaBlock);
-    }
-    seek(0);
-  }
-
-  public HdfsFileReader(FileSystem fileSystem, Path path) throws IOException {
-    this(fileSystem, path, HdfsDirectory.BUFFER_SIZE);
-  }
-
-  public long getPosition() {
-    return _logicalPos;
-  }
-
-  public long length() {
-    return _length;
-  }
-
-  public void seek(long pos) throws IOException {
-    if (_logicalPos == pos) {
-      return;
-    }
-    _logicalPos = pos;
-    seekInternal();
-  }
-
-  public void close() throws IOException {
-    if (!isClone) {
-      _inputStream.close();
-    }
-  }
-
-  /**
-   * This method should never be used!
-   */
-  @Override
-  public byte readByte() throws IOException {
-    LOG.warn("Should not be used!");
-    byte[] buf = new byte[1];
-    readBytes(buf, 0, 1);
-    return buf[0];
-  }
-
-  @Override
-  public void readBytes(byte[] b, int offset, int len) throws IOException {
-    checkBoundary();
-    // might need to read in multiple stages
-    while (len > 0) {
-      if (_logicalPos >= _boundary) {
-        seekInternal();
-      }
-      int lengthToRead = (int) Math.min(_boundary - _logicalPos, len);
-      _inputStream.read(_realPos, b, offset, lengthToRead);
-      offset += lengthToRead;
-      _logicalPos += lengthToRead;
-      _realPos += lengthToRead;
-      len -= lengthToRead;
-    }
-  }
-
-  private void checkBoundary() throws IOException {
-    if (_boundary == -1l) {
-      throw new IOException("eof");
-    }
-  }
-
-  private void seekInternal() throws IOException {
-    HdfsMetaBlock block = null;
-    for (HdfsMetaBlock b : _metaBlocks) {
-      if (b.containsDataAt(_logicalPos)) {
-        block = b;
-      }
-    }
-    if (block == null) {
-      _boundary = -1l;
-      _realPos = -1l;
-    } else {
-      _realPos = block.getRealPosition(_logicalPos);
-      _boundary = getBoundary(block);
-    }
-  }
-
-  private long getBoundary(HdfsMetaBlock block) {
-    _boundary = block.logicalPosition + block.length;
-    for (HdfsMetaBlock b : _metaBlocks) {
-      if (b.logicalPosition > block.logicalPosition && b.logicalPosition < _boundary && b.logicalPosition >= _logicalPos) {
-        _boundary = b.logicalPosition;
-      }
-    }
-    return _boundary;
-  }
-
-  public static long getLength(FileSystem fileSystem, Path path) throws IOException {
-    FSDataInputStream inputStream = null;
-    try {
-      FileStatus fileStatus = fileSystem.getFileStatus(path);
-      inputStream = fileSystem.open(path);
-      long hdfsLength = fileStatus.getLen();
-      inputStream.seek(hdfsLength - 12);
-      long length = inputStream.readLong();
-      int version = inputStream.readInt();
-      if (version != VERSION) {
-        throw new RuntimeException("Version of file [" + version + "] does not match reader [" + VERSION + "]");
-      }
-      return length;
-    } finally {
-      if (inputStream != null) {
-        inputStream.close();
-      }
-    }
-  }
-
-  @Override
-  public Object clone() {
-    HdfsFileReader reader = (HdfsFileReader) super.clone();
-    reader.isClone = true;
-    return reader;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileWriter.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileWriter.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileWriter.java
deleted file mode 100644
index 1c23f60..0000000
--- a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsFileWriter.java
+++ /dev/null
@@ -1,99 +0,0 @@
-package org.apache.blur.store.hdfs;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.store.DataOutput;
-
-public class HdfsFileWriter extends DataOutput {
-
-  public static final int VERSION = -1;
-
-  private FSDataOutputStream _outputStream;
-  private HdfsMetaBlock _block;
-  private List<HdfsMetaBlock> _blocks = new ArrayList<HdfsMetaBlock>();
-  private long _length;
-  private long _currentPosition;
-
-  public HdfsFileWriter(FileSystem fileSystem, Path path) throws IOException {
-    _outputStream = fileSystem.create(path);
-    seek(0);
-  }
-
-  public long length() {
-    return _length;
-  }
-
-  public void seek(long pos) throws IOException {
-    if (_block != null) {
-      _blocks.add(_block);
-    }
-    _block = new HdfsMetaBlock();
-    _block.realPosition = _outputStream.getPos();
-    _block.logicalPosition = pos;
-    _currentPosition = pos;
-  }
-
-  public void close() throws IOException {
-    if (_block != null) {
-      _blocks.add(_block);
-    }
-    flushMetaBlocks();
-    _outputStream.close();
-  }
-
-  private void flushMetaBlocks() throws IOException {
-    for (HdfsMetaBlock block : _blocks) {
-      block.write(_outputStream);
-    }
-    _outputStream.writeInt(_blocks.size());
-    _outputStream.writeLong(length());
-    _outputStream.writeInt(VERSION);
-  }
-
-  @Override
-  public void writeByte(byte b) throws IOException {
-    _outputStream.write(b & 0xFF);
-    _block.length++;
-    _currentPosition++;
-    updateLength();
-  }
-
-  @Override
-  public void writeBytes(byte[] b, int offset, int length) throws IOException {
-    _outputStream.write(b, offset, length);
-    _block.length += length;
-    _currentPosition += length;
-    updateLength();
-  }
-
-  private void updateLength() {
-    if (_currentPosition > _length) {
-      _length = _currentPosition;
-    }
-  }
-
-  public long getPosition() {
-    return _currentPosition;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsMetaBlock.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsMetaBlock.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsMetaBlock.java
deleted file mode 100644
index b939293..0000000
--- a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsMetaBlock.java
+++ /dev/null
@@ -1,61 +0,0 @@
-package org.apache.blur.store.hdfs;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-
-class HdfsMetaBlock implements Writable {
-  long logicalPosition;
-  long realPosition;
-  long length;
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    logicalPosition = in.readLong();
-    realPosition = in.readLong();
-    length = in.readLong();
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeLong(logicalPosition);
-    out.writeLong(realPosition);
-    out.writeLong(length);
-  }
-
-  boolean containsDataAt(long logicalPos) {
-    if (logicalPos >= logicalPosition && logicalPos < logicalPosition + length) {
-      return true;
-    }
-    return false;
-  }
-
-  long getRealPosition(long logicalPos) {
-    long offset = logicalPos - logicalPosition;
-    long pos = realPosition + offset;
-    return pos;
-  }
-
-  @Override
-  public String toString() {
-    return "HdfsMetaBlock [length=" + length + ", logicalPosition=" + logicalPosition + ", realPosition=" + realPosition + "]";
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/blur/store/hdfs/NullIndexOutput.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/NullIndexOutput.java b/src/blur-store/src/main/java/org/apache/blur/store/hdfs/NullIndexOutput.java
deleted file mode 100644
index 1a03c10..0000000
--- a/src/blur-store/src/main/java/org/apache/blur/store/hdfs/NullIndexOutput.java
+++ /dev/null
@@ -1,70 +0,0 @@
-package org.apache.blur.store.hdfs;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.IOException;
-
-import org.apache.lucene.store.IndexOutput;
-
-public class NullIndexOutput extends IndexOutput {
-
-  private long _pos;
-  private long _length;
-
-  @Override
-  public void close() throws IOException {
-
-  }
-
-  @Override
-  public void flush() throws IOException {
-
-  }
-
-  @Override
-  public long getFilePointer() {
-    return _pos;
-  }
-
-  @Override
-  public long length() throws IOException {
-    return _length;
-  }
-
-  @Override
-  public void seek(long pos) throws IOException {
-    _pos = pos;
-  }
-
-  @Override
-  public void writeByte(byte b) throws IOException {
-    _pos++;
-  }
-
-  @Override
-  public void writeBytes(byte[] b, int offset, int length) throws IOException {
-    _pos += length;
-    updateLength();
-  }
-
-  private void updateLength() {
-    if (_pos > _length) {
-      _length = _pos;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/blur/store/lock/BlurLockFactory.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/blur/store/lock/BlurLockFactory.java b/src/blur-store/src/main/java/org/apache/blur/store/lock/BlurLockFactory.java
deleted file mode 100644
index c79608c..0000000
--- a/src/blur-store/src/main/java/org/apache/blur/store/lock/BlurLockFactory.java
+++ /dev/null
@@ -1,102 +0,0 @@
-package org.apache.blur.store.lock;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.IOException;
-import java.util.Arrays;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.store.Lock;
-import org.apache.lucene.store.LockFactory;
-
-public class BlurLockFactory extends LockFactory {
-
-  private final Configuration _configuration;
-  private final FileSystem _fileSystem;
-  private final String _baseLockKey;
-  private byte[] _lockKey;
-  private final Path _dir;
-
-  public BlurLockFactory(Configuration configuration, Path dir, String host, int pid) throws IOException {
-    _configuration = configuration;
-    _dir = dir;
-    _fileSystem = _dir.getFileSystem(_configuration);
-    _baseLockKey = host + "/" + pid;
-  }
-
-  @Override
-  public Lock makeLock(String lockName) {
-    final Path lockPath = new Path(_dir, lockName);
-    return new Lock() {
-      private boolean _set;
-
-      @Override
-      public boolean obtain() throws IOException {
-        if (_set) {
-          throw new IOException("Lock for [" + _baseLockKey + "] can only be set once.");
-        }
-        try {
-          _lockKey = (_baseLockKey + "/" + System.currentTimeMillis()).getBytes();
-          FSDataOutputStream outputStream = _fileSystem.create(lockPath, true);
-          outputStream.write(_lockKey);
-          outputStream.close();
-        } finally {
-          _set = true;
-        }
-        return true;
-      }
-
-      @Override
-      public void release() throws IOException {
-        _fileSystem.delete(lockPath, false);
-      }
-
-      @Override
-      public boolean isLocked() throws IOException {
-        if (!_set) {
-          return false;
-        }
-        if (!_fileSystem.exists(lockPath)) {
-          return false;
-        }
-        FileStatus fileStatus = _fileSystem.getFileStatus(lockPath);
-        long len = fileStatus.getLen();
-        if (len != _lockKey.length) {
-          return false;
-        }
-        byte[] buf = new byte[_lockKey.length];
-        FSDataInputStream inputStream = _fileSystem.open(lockPath);
-        inputStream.readFully(buf);
-        inputStream.close();
-        if (Arrays.equals(_lockKey, buf)) {
-          return true;
-        }
-        return false;
-      }
-    };
-  }
-
-  @Override
-  public void clearLock(String lockName) throws IOException {
-    _fileSystem.delete(new Path(_dir, lockName), false);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBounds.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBounds.java b/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBounds.java
deleted file mode 100644
index c159fbc..0000000
--- a/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBounds.java
+++ /dev/null
@@ -1,217 +0,0 @@
-package org.apache.lucene.index;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.blur.log.Log;
-import org.apache.blur.log.LogFactory;
-import org.apache.blur.lucene.LuceneConstant;
-import org.apache.lucene.analysis.KeywordAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.LockObtainFailedException;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.ReaderUtil;
-
-
-public class WarmUpByFieldBounds {
-
-  public static void main(String[] args) throws CorruptIndexException, IOException {
-    Directory dir = getDir();
-    IndexReader reader = IndexReader.open(dir);
-    AtomicBoolean isClosed = new AtomicBoolean(false);
-    WarmUpByFieldBounds warmUpByFieldBounds = new WarmUpByFieldBounds();
-    WarmUpByFieldBoundsStatus status = new WarmUpByFieldBoundsStatus() {
-      @Override
-      public void complete(String name, Term start, Term end, long startPosition, long endPosition, long totalBytesRead, long nanoTime, AtomicBoolean isClosed) {
-        // System.out.println(name + " " + start + " " + end + " " +
-        // startPosition + " " + endPosition + " " + totalBytesRead + " " +
-        // nanoTime + " " + isClosed);
-
-        double bytesPerNano = totalBytesRead / (double) nanoTime;
-        double mBytesPerNano = bytesPerNano / 1024 / 1024;
-        double mBytesPerSecond = mBytesPerNano * 1000000000.0;
-        if (totalBytesRead > 0) {
-          System.out.println("Precached field [" + start.field() + "] in file [" + name + "], " + totalBytesRead + " bytes cached at [" + mBytesPerSecond + " MB/s]");
-        }
-      }
-    };
-    warmUpByFieldBounds.warmUpByField(isClosed, new Term("f1"), reader, status);
-    warmUpByFieldBounds.warmUpByField(isClosed, new Term("f0"), reader, status);
-    warmUpByFieldBounds.warmUpByField(isClosed, new Term("f9"), reader, status);
-    warmUpByFieldBounds.warmUpByField(isClosed, new Term("f"), reader, status);
-  }
-
-  private static Directory getDir() throws CorruptIndexException, LockObtainFailedException, IOException {
-    RAMDirectory dir = new RAMDirectory();
-    IndexWriterConfig conf = new IndexWriterConfig(LuceneConstant.LUCENE_VERSION, new KeywordAnalyzer());
-    TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
-    mergePolicy.setUseCompoundFile(false);
-    IndexWriter writer = new IndexWriter(dir, conf);
-    for (int i = 0; i < 100000; i++) {
-      writer.addDocument(getDoc());
-    }
-    writer.close();
-    return dir;
-  }
-
-  private static Document getDoc() {
-    Document document = new Document();
-    for (int i = 0; i < 10; i++) {
-      document.add(new Field("f" + i, UUID.randomUUID().toString(), Store.YES, Index.ANALYZED));
-    }
-    return document;
-  }
-
-  private static final Log LOG = LogFactory.getLog(WarmUpByFieldBounds.class);
-
-  public void warmUpByField(AtomicBoolean isClosed, Term term, IndexReader reader, WarmUpByFieldBoundsStatus status) throws IOException {
-    FieldInfos fieldInfos = ReaderUtil.getMergedFieldInfos(reader);
-    Collection<String> fieldNames = new HashSet<String>();
-    for (FieldInfo info : fieldInfos) {
-      if (info.isIndexed) {
-        fieldNames.add(info.name);
-      }
-    }
-    List<String> fields = new ArrayList<String>(fieldNames);
-    Collections.sort(fields);
-    int index = fields.indexOf(term.field);
-    if (index < fields.size() - 1) {
-      warmUpByTermRange(isClosed, term, new Term(fields.get(index + 1)), reader, status);
-    } else {
-      warmUpByTermRange(isClosed, term, null, reader, status);
-    }
-  }
-
-  public void warmUpByTermRange(AtomicBoolean isClosed, Term start, Term end, IndexReader reader, WarmUpByFieldBoundsStatus status) throws IOException {
-    if (reader instanceof SegmentReader) {
-      warmUpByTermRangeSegmentReader(isClosed, start, end, (SegmentReader) reader, status);
-      return;
-    }
-    IndexReader[] subReaders = reader.getSequentialSubReaders();
-    if (subReaders == null) {
-      throw new RuntimeException("Reader is not supported [" + reader.getClass() + "] [" + reader + "]");
-    }
-    for (int i = 0; i < subReaders.length; i++) {
-      warmUpByTermRange(isClosed, start, end, subReaders[i], status);
-    }
-  }
-
-  private static void warmUpByTermRangeSegmentReader(AtomicBoolean isClosed, Term start, Term end, SegmentReader reader, WarmUpByFieldBoundsStatus status) throws IOException {
-    SegmentCoreReaders core = reader.core;
-    TermInfosReader termsReader = core.getTermsReader();
-    Directory directory = reader.directory();
-    String segmentName = reader.getSegmentName();
-    IndexInput tis = null;
-    IndexInput frq = null;
-    IndexInput prx = null;
-    try {
-      String nameTis = segmentName + ".tis";
-      String nameFrq = segmentName + ".frq";
-      String namePrx = segmentName + ".prx";
-      tis = directory.openInput(nameTis);
-      frq = directory.openInput(nameFrq);
-      prx = directory.openInput(namePrx);
-
-      long startTermPointer = 0;
-      long endTermPointer = tis.length();
-      long startFreqPointer = 0;
-      long endFreqPointer = frq.length();
-      long startProxPointer = 0;
-      long endProxPointer = prx.length();
-      if (start != null) {
-        Term realStartTerm = getFirstTerm(start, reader);
-        if (realStartTerm == null) {
-          return;
-        }
-        TermInfo startTermInfo = termsReader.get(realStartTerm);
-        startTermPointer = termsReader.getPosition(realStartTerm);
-        startFreqPointer = startTermInfo.freqPointer;
-        startProxPointer = startTermInfo.proxPointer;
-      }
-
-      if (end != null) {
-        Term realEndTerm = getFirstTerm(end, reader);
-        if (realEndTerm == null) {
-          return;
-        }
-        TermInfo endTermInfo = termsReader.get(realEndTerm);
-        endTermPointer = termsReader.getPosition(realEndTerm);
-        endFreqPointer = endTermInfo.freqPointer;
-        endProxPointer = endTermInfo.proxPointer;
-      }
-      readFile(isClosed, tis, startTermPointer, endTermPointer, status, start, end, nameTis);
-      readFile(isClosed, frq, startFreqPointer, endFreqPointer, status, start, end, nameFrq);
-      readFile(isClosed, prx, startProxPointer, endProxPointer, status, start, end, namePrx);
-    } finally {
-      close(tis);
-      close(frq);
-      close(prx);
-    }
-  }
-
-  private static Term getFirstTerm(Term t, SegmentReader reader) throws IOException {
-    TermEnum terms = reader.terms(t);
-    try {
-      if (terms.next()) {
-        return terms.term();
-      }
-      return null;
-    } finally {
-      terms.close();
-    }
-  }
-
-  private static void close(IndexInput input) {
-    try {
-      if (input != null) {
-        input.close();
-      }
-    } catch (IOException e) {
-      LOG.error("Error while trying to close file [" + input + "]", e);
-    }
-  }
-
-  private static void readFile(AtomicBoolean isClosed, IndexInput input, long startTermPointer, long endTermPointer, WarmUpByFieldBoundsStatus status, Term start, Term end,
-      String name) throws IOException {
-    byte[] buffer = new byte[4096];
-    long position = startTermPointer;
-    input.seek(position);
-    long total = 0;
-    long s = System.nanoTime();
-    while (position < endTermPointer && !isClosed.get()) {
-      int length = (int) Math.min(buffer.length, endTermPointer - position);
-      input.readBytes(buffer, 0, length);
-      position += length;
-      total += length;
-    }
-    long e = System.nanoTime();
-    status.complete(name, start, end, startTermPointer, endTermPointer, total, e - s, isClosed);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBoundsStatus.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBoundsStatus.java b/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBoundsStatus.java
deleted file mode 100644
index d327796..0000000
--- a/src/blur-store/src/main/java/org/apache/lucene/index/WarmUpByFieldBoundsStatus.java
+++ /dev/null
@@ -1,25 +0,0 @@
-package org.apache.lucene.index;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.util.concurrent.atomic.AtomicBoolean;
-
-public interface WarmUpByFieldBoundsStatus {
-
-  void complete(String name, Term start, Term end, long startPosition, long endPosition, long totalBytesRead, long nanoTime, AtomicBoolean isClosed);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectory.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectory.java b/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectory.java
deleted file mode 100644
index 9bc9d3c..0000000
--- a/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectory.java
+++ /dev/null
@@ -1,148 +0,0 @@
-package org.apache.blur.store;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.UUID;
-
-import org.apache.blur.metrics.BlurMetrics;
-import org.apache.blur.store.blockcache.BlockCache;
-import org.apache.blur.store.blockcache.BlockDirectory;
-import org.apache.blur.store.blockcache.BlockDirectoryCache;
-import org.apache.blur.store.hdfs.HdfsDirectory;
-import org.apache.blur.store.lock.BlurLockFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.index.TieredMergePolicy;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.WildcardQuery;
-
-
-import static org.apache.blur.lucene.LuceneConstant.LUCENE_VERSION;
-
-public class BenchmarkDirectory {
-
-  public static void main(String[] args) throws IOException {
-    int blockSize = BlockDirectory.BLOCK_SIZE;
-    long totalMemory = BlockCache._128M * 2;
-    int slabSize = (int) (totalMemory / 2);
-
-    BlockCache blockCache = new BlockCache(new BlurMetrics(new Configuration()), true, totalMemory, slabSize, blockSize);
-    BlurMetrics metrics = new BlurMetrics(new Configuration());
-    BlockDirectoryCache cache = new BlockDirectoryCache(blockCache, metrics);
-
-    Configuration configuration = new Configuration();
-    Path p = new Path("hdfs://localhost:9000/bench");
-    BlurLockFactory factory = new BlurLockFactory(configuration, p, "localhost", 0);
-
-    FileSystem fs = FileSystem.get(p.toUri(), configuration);
-    fs.delete(p, true);
-
-    final HdfsDirectory dir = new HdfsDirectory(p);
-    dir.setLockFactory(factory);
-
-    BlockDirectory directory = new BlockDirectory("test", dir, cache);
-
-    while (true) {
-      long s, e;
-
-      s = System.currentTimeMillis();
-      IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, new StandardAnalyzer(LUCENE_VERSION));
-      TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
-      mergePolicy.setUseCompoundFile(false);
-      IndexWriter writer = new IndexWriter(directory, conf);
-      for (int i = 0; i < 1000000; i++) {
-        writer.addDocument(getDoc());
-      }
-      writer.close();
-      e = System.currentTimeMillis();
-      System.out.println("Indexing " + (e - s));
-
-      IndexReader reader = IndexReader.open(directory);
-      System.out.println("Docs " + reader.numDocs());
-      TermEnum terms = reader.terms();
-      List<Term> sample = new ArrayList<Term>();
-      int limit = 1000;
-      Random random = new Random();
-      SAMPLE: while (terms.next()) {
-        if (sample.size() < limit) {
-          if (random.nextInt() % 7 == 0) {
-            sample.add(terms.term());
-          }
-        } else {
-          break SAMPLE;
-        }
-      }
-      terms.close();
-
-      System.out.println("Sampling complete [" + sample.size() + "]");
-      IndexSearcher searcher = new IndexSearcher(reader);
-      long total = 0;
-      long time = 0;
-      int search = 10;
-      for (int i = 0; i < search; i++) {
-        s = System.currentTimeMillis();
-        TopDocs topDocs = searcher.search(new TermQuery(sample.get(random.nextInt(sample.size()))), 10);
-        total += topDocs.totalHits;
-        e = System.currentTimeMillis();
-        time += (e - s);
-      }
-      System.out.println("Searching " + time + " " + (time / (double) search) + " " + total);
-      for (int i = 0; i < 10; i++) {
-        s = System.currentTimeMillis();
-        TopDocs topDocs = searcher.search(new WildcardQuery(new Term("name", "fff*0*")), 10);
-        e = System.currentTimeMillis();
-        System.out.println(topDocs.totalHits + " " + (e - s));
-      }
-      reader.close();
-    }
-  }
-
-  private static Document getDoc() {
-    Document document = new Document();
-    document.add(new Field("name", UUID.randomUUID().toString(), Store.YES, Index.ANALYZED_NO_NORMS));
-    return document;
-  }
-
-  public static int getNumberOfSlabs(float heapPercentage, int numberOfBlocksPerSlab, int blockSize) {
-    long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
-    long targetBytes = (long) (max * heapPercentage);
-    int slabSize = numberOfBlocksPerSlab * blockSize;
-    int slabs = (int) (targetBytes / slabSize);
-    if (slabs == 0) {
-      throw new RuntimeException("Minimum heap size is 512m!");
-    }
-    return slabs;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectoryNrt.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectoryNrt.java b/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectoryNrt.java
deleted file mode 100644
index 4f1ccdd..0000000
--- a/src/blur-store/src/test/java/org/apache/blur/store/BenchmarkDirectoryNrt.java
+++ /dev/null
@@ -1,160 +0,0 @@
-package org.apache.blur.store;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.UUID;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.blur.metrics.BlurMetrics;
-import org.apache.blur.store.blockcache.BlockCache;
-import org.apache.blur.store.blockcache.BlockDirectory;
-import org.apache.blur.store.blockcache.BlockDirectoryCache;
-import org.apache.blur.store.hdfs.HdfsDirectory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.TieredMergePolicy;
-import org.apache.lucene.store.NoLockFactory;
-
-
-import static org.apache.blur.lucene.LuceneConstant.LUCENE_VERSION;
-
-public class BenchmarkDirectoryNrt {
-
-  public static void main(String[] args) throws IOException, InterruptedException {
-    int blockSize = BlockDirectory.BLOCK_SIZE;
-    long totalMemory = BlockCache._128M * 2;
-    int slabSize = (int) (totalMemory / 2);
-
-    BlockCache blockCache = new BlockCache(new BlurMetrics(new Configuration()), true, totalMemory, slabSize, blockSize);
-    BlurMetrics metrics = new BlurMetrics(new Configuration());
-    BlockDirectoryCache cache = new BlockDirectoryCache(blockCache, metrics);
-
-    Path p = new Path("hdfs://localhost:9000/bench");
-    FileSystem fs = FileSystem.get(p.toUri(), new Configuration());
-    fs.delete(p, true);
-
-    final HdfsDirectory dir = new HdfsDirectory(p);
-    dir.setLockFactory(NoLockFactory.getNoLockFactory());
-
-    BlockDirectory directory = new BlockDirectory("test", dir, cache);
-
-    while (true) {
-      IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, new StandardAnalyzer(LUCENE_VERSION));
-      TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
-      mergePolicy.setUseCompoundFile(false);
-      int count = 0;
-      int max = 10000;
-      long s = System.currentTimeMillis();
-      IndexWriter writer = new IndexWriter(directory, conf);
-      long as = System.currentTimeMillis();
-      BlockingQueue<Collection<Document>> queue = new ArrayBlockingQueue<Collection<Document>>(1024);
-      Indexer indexer = new Indexer(queue, writer);
-      new Thread(indexer).start();
-      for (int i = 0; i < 1000000; i++) {
-        if (count >= max) {
-          double aseconds = (System.currentTimeMillis() - as) / 1000.0;
-          double arate = i / aseconds;
-          double seconds = (System.currentTimeMillis() - s) / 1000.0;
-          double rate = count / seconds;
-          System.out.println("Total [" + i + "] Rate [" + rate + "] AvgRate [" + arate + "] Doc count [" + indexer.getReader().numDocs() + "]");
-          count = 0;
-          s = System.currentTimeMillis();
-        }
-        queue.put(Arrays.asList(getDoc()));
-        count++;
-      }
-      writer.close();
-    }
-  }
-
-  private static class Indexer implements Runnable {
-
-    private BlockingQueue<Collection<Document>> _queue;
-    private AtomicBoolean _running = new AtomicBoolean(true);
-    private IndexWriter _writer;
-    private IndexReader _reader;
-
-    public Indexer(BlockingQueue<Collection<Document>> queue, IndexWriter writer) throws CorruptIndexException, IOException {
-      _queue = queue;
-      _writer = writer;
-      _reader = IndexReader.open(_writer, true);
-    }
-
-    public IndexReader getReader() {
-      return _reader;
-    }
-
-    @Override
-    public void run() {
-      long cycleTime = 50000000;
-      long start = System.nanoTime();
-      while (_running.get()) {
-        try {
-          Collection<Document> docs = _queue.take();
-          _writer.addDocuments(docs);
-          if (start + cycleTime < System.nanoTime()) {
-            IndexReader newReader = IndexReader.open(_writer, true);
-            _reader.close();
-            _reader = newReader;
-            start = System.nanoTime();
-          }
-        } catch (InterruptedException e) {
-          return;
-        } catch (CorruptIndexException e) {
-          e.printStackTrace();
-          return;
-        } catch (IOException e) {
-          e.printStackTrace();
-          return;
-        }
-      }
-    }
-  }
-
-  private static Document getDoc() {
-    Document document = new Document();
-    document.add(new Field("name", UUID.randomUUID().toString(), Store.YES, Index.ANALYZED_NO_NORMS));
-    return document;
-  }
-
-  public static int getNumberOfSlabs(float heapPercentage, int numberOfBlocksPerSlab, int blockSize) {
-    long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
-    long targetBytes = (long) (max * heapPercentage);
-    int slabSize = numberOfBlocksPerSlab * blockSize;
-    int slabs = (int) (targetBytes / slabSize);
-    if (slabs == 0) {
-      throw new RuntimeException("Minimum heap size is 512m!");
-    }
-    return slabs;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/test/java/org/apache/blur/store/HdfsDirectoryTest.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/org/apache/blur/store/HdfsDirectoryTest.java b/src/blur-store/src/test/java/org/apache/blur/store/HdfsDirectoryTest.java
index ef9e4cb..fba2a42 100644
--- a/src/blur-store/src/test/java/org/apache/blur/store/HdfsDirectoryTest.java
+++ b/src/blur-store/src/test/java/org/apache/blur/store/HdfsDirectoryTest.java
@@ -28,17 +28,20 @@ import java.util.HashSet;
 import java.util.Random;
 import java.util.Set;
 
+import org.apache.blur.store.buffer.BufferStore;
 import org.apache.blur.store.hdfs.HdfsDirectory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.RAMDirectory;
 import org.junit.Before;
 import org.junit.Test;
 
-
 public class HdfsDirectoryTest {
+  private static final File TMPDIR = new File(System.getProperty("blur.tmp.dir", "/tmp"));
 
   private static final int MAX_NUMBER_OF_WRITES = 10000;
   private static final int MIN_FILE_SIZE = 100;
@@ -53,25 +56,26 @@ public class HdfsDirectoryTest {
 
   @Before
   public void setUp() throws IOException {
-    file = new File("./tmp");
+    BufferStore.init(128, 128);
+    file = new File(TMPDIR, "hdfsdirectorytest");
     rm(file);
     URI uri = new File(file, "hdfs").toURI();
     Path hdfsDirPath = new Path(uri.toString());
-    directory = new HdfsDirectory(hdfsDirPath);
+    Configuration conf = new Configuration();
+    directory = new HdfsDirectory(conf, hdfsDirPath);
     seed = new Random().nextLong();
-    // seed = 7392202912208392081L;
     random = new Random(seed);
   }
 
   @Test
   public void testWritingAndReadingAFile() throws IOException {
 
-    IndexOutput output = directory.createOutput("testing.test");
+    IndexOutput output = directory.createOutput("testing.test", IOContext.DEFAULT);
     output.writeInt(12345);
     output.flush();
     output.close();
 
-    IndexInput input = directory.openInput("testing.test");
+    IndexInput input = directory.openInput("testing.test", IOContext.DEFAULT);
     assertEquals(12345, input.readInt());
     input.close();
 
@@ -81,7 +85,7 @@ public class HdfsDirectoryTest {
 
     assertEquals(4, directory.fileLength("testing.test"));
 
-    IndexInput input1 = directory.openInput("testing.test");
+    IndexInput input1 = directory.openInput("testing.test", IOContext.DEFAULT);
 
     IndexInput input2 = (IndexInput) input1.clone();
     assertEquals(12345, input2.readInt());
@@ -109,7 +113,7 @@ public class HdfsDirectoryTest {
   }
 
   private void testEof(String name, Directory directory, long length) throws IOException {
-    IndexInput input = directory.openInput(name);
+    IndexInput input = directory.openInput(name, IOContext.DEFAULT);
     input.seek(length);
     try {
       input.readByte();
@@ -119,7 +123,7 @@ public class HdfsDirectoryTest {
   }
 
   @Test
-  public void testRandomAccessWrites() throws IOException {
+  public void testWrites() throws IOException {
     int i = 0;
     try {
       Set<String> names = new HashSet<String>();
@@ -140,9 +144,8 @@ public class HdfsDirectoryTest {
 
   private void assertInputsEquals(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException {
     int reads = random.nextInt(MAX_NUMBER_OF_READS);
-    int buffer = random.nextInt(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE) + MIN_BUFFER_SIZE;
-    IndexInput fsInput = fsDir.openInput(name, buffer);
-    IndexInput hdfsInput = hdfs.openInput(name, buffer);
+    IndexInput fsInput = fsDir.openInput(name, IOContext.DEFAULT);
+    IndexInput hdfsInput = hdfs.openInput(name, IOContext.DEFAULT);
     assertEquals(fsInput.length(), hdfsInput.length());
     int fileLength = (int) fsInput.length();
     for (int i = 0; i < reads; i++) {
@@ -168,9 +171,9 @@ public class HdfsDirectoryTest {
   private void createFile(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException {
     int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
     int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
-    IndexOutput fsOutput = fsDir.createOutput(name);
+    IndexOutput fsOutput = fsDir.createOutput(name, IOContext.DEFAULT);
     fsOutput.setLength(fileLength);
-    IndexOutput hdfsOutput = hdfs.createOutput(name);
+    IndexOutput hdfsOutput = hdfs.createOutput(name, IOContext.DEFAULT);
     hdfsOutput.setLength(fileLength);
     for (int i = 0; i < writes; i++) {
       byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/test/java/org/apache/blur/store/UsingHdfsDir.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/org/apache/blur/store/UsingHdfsDir.java b/src/blur-store/src/test/java/org/apache/blur/store/UsingHdfsDir.java
deleted file mode 100644
index 302a7a1..0000000
--- a/src/blur-store/src/test/java/org/apache/blur/store/UsingHdfsDir.java
+++ /dev/null
@@ -1,112 +0,0 @@
-package org.apache.blur.store;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import static org.apache.blur.lucene.LuceneConstant.LUCENE_VERSION;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.blur.store.hdfs.HdfsDirectory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.store.NoLockFactory;
-
-
-public class UsingHdfsDir {
-
-  public static void main(String[] args) throws IOException {
-
-    // FileSystem fs = FileSystem.getLocal(new Configuration());
-    // Path p = new Path("file:///tmp/testdir");
-
-    Path p = new Path("hdfs://localhost:9000/test-dir");
-    FileSystem fs = FileSystem.get(p.toUri(), new Configuration());
-    fs.delete(p, true);
-
-    final HdfsDirectory directory = new HdfsDirectory(p);
-    directory.setLockFactory(NoLockFactory.getNoLockFactory());
-
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(LUCENE_VERSION, new StandardAnalyzer(LUCENE_VERSION)));
-    for (int i = 0; i < 100000; i++) {
-      writer.addDocument(getDoc());
-    }
-    writer.close();
-
-    IndexReader reader = IndexReader.open(directory);
-    TermEnum terms = reader.terms();
-    while (terms.next()) {
-      System.out.println(terms.term());
-    }
-    terms.close();
-
-    IndexSearcher searcher = new IndexSearcher(reader);
-    TopDocs topDocs = searcher.search(new TermQuery(new Term("name", "ffff")), 10);
-    System.out.println(topDocs.totalHits);
-
-    reader.close();
-
-    List<String> files = new ArrayList<String>(Arrays.asList(directory.listAll()));
-    Collections.sort(files, new Comparator<String>() {
-      @Override
-      public int compare(String o1, String o2) {
-        try {
-          long fileLength1 = directory.fileLength(o1);
-          long fileLength2 = directory.fileLength(o2);
-          if (fileLength1 == fileLength2) {
-            return o1.compareTo(o2);
-          }
-          return (int) (fileLength2 - fileLength1);
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
-
-    for (String file : files) {
-      System.out.println(file + " " + directory.fileLength(file));
-    }
-
-    directory.close();
-  }
-
-  private static Document getDoc() {
-    Document document = new Document();
-    document.add(new Field("name", UUID.randomUUID().toString(), Store.YES, Index.ANALYZED_NO_NORMS));
-    return document;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/test/java/org/apache/blur/store/blockcache/BlockCacheTest.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/org/apache/blur/store/blockcache/BlockCacheTest.java b/src/blur-store/src/test/java/org/apache/blur/store/blockcache/BlockCacheTest.java
index 86087b0..98b5c74 100644
--- a/src/blur-store/src/test/java/org/apache/blur/store/blockcache/BlockCacheTest.java
+++ b/src/blur-store/src/test/java/org/apache/blur/store/blockcache/BlockCacheTest.java
@@ -22,25 +22,20 @@ import java.util.Arrays;
 import java.util.Random;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.blur.metrics.BlurMetrics;
-import org.apache.blur.store.blockcache.BlockCache;
-import org.apache.blur.store.blockcache.BlockCacheKey;
-import org.apache.hadoop.conf.Configuration;
 import org.junit.Test;
 
-
 public class BlockCacheTest {
   @Test
   public void testBlockCache() {
     int blocksInTest = 2000000;
-    int blockSize = 1024;
-
-    int slabSize = blockSize * 4096;
+    int blockSize = BlockCache._8K;
+    int slabSize = blockSize * 1024;
     long totalMemory = 2 * slabSize;
 
-    BlockCache blockCache = new BlockCache(new BlurMetrics(new Configuration()), true, totalMemory, slabSize, blockSize);
-    byte[] buffer = new byte[1024];
+    BlockCache blockCache = new BlockCache(true, totalMemory, slabSize);
+    byte[] buffer = new byte[blockSize];
     Random random = new Random();
+
     byte[] newData = new byte[blockSize];
     AtomicLong hitsInCache = new AtomicLong();
     AtomicLong missesInCache = new AtomicLong();
@@ -64,7 +59,7 @@ public class BlockCacheTest {
 
       byte[] testData = testData(random, blockSize, newData);
       long t1 = System.nanoTime();
-      blockCache.store(blockCacheKey, testData);
+      blockCache.store(blockCacheKey, 0, testData, 0, blockSize);
       storeTime += (System.nanoTime() - t1);
 
       long t3 = System.nanoTime();
@@ -80,6 +75,32 @@ public class BlockCacheTest {
     System.out.println("# of Elements = " + blockCache.getSize());
   }
 
+  /**
+   * Verify checking of buffer size limits against the cached block size.
+   */
+  @Test
+  public void testLongBuffer() {
+    Random random = new Random();
+    int blockSize = BlockCache._8K;
+    int slabSize = blockSize * 1024;
+    long totalMemory = 2 * slabSize;
+
+    BlockCache blockCache = new BlockCache(true, totalMemory, slabSize);
+    BlockCacheKey blockCacheKey = new BlockCacheKey();
+    blockCacheKey.setBlock(0);
+    blockCacheKey.setFile(0);
+    byte[] newData = new byte[blockSize*3];
+    byte[] testData = testData(random, blockSize, newData);
+
+    assertTrue(blockCache.store(blockCacheKey, 0, testData, 0, blockSize));
+    assertTrue(blockCache.store(blockCacheKey, 0, testData, blockSize, blockSize));
+    assertTrue(blockCache.store(blockCacheKey, 0, testData, blockSize*2, blockSize));
+
+    assertTrue(blockCache.store(blockCacheKey, 1, testData, 0, blockSize - 1));
+    assertTrue(blockCache.store(blockCacheKey, 1, testData, blockSize, blockSize - 1));
+    assertTrue(blockCache.store(blockCacheKey, 1, testData, blockSize*2, blockSize - 1));
+  }
+
   private static byte[] testData(Random random, int size, byte[] buf) {
     random.nextBytes(buf);
     return buf;

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/a4601422/src/blur-store/src/test/java/org/apache/blur/store/blockcache/BlockDirectoryCacheTest.java
----------------------------------------------------------------------
diff --git a/src/blur-store/src/test/java/org/apache/blur/store/blockcache/BlockDirectoryCacheTest.java b/src/blur-store/src/test/java/org/apache/blur/store/blockcache/BlockDirectoryCacheTest.java
new file mode 100644
index 0000000..6a30d10
--- /dev/null
+++ b/src/blur-store/src/test/java/org/apache/blur/store/blockcache/BlockDirectoryCacheTest.java
@@ -0,0 +1,38 @@
+package org.apache.blur.store.blockcache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.junit.Before;
+import org.junit.Test;
+
+public class BlockDirectoryCacheTest {
+  private BlockCache blockCache;
+  private BlockDirectoryCache blockDirectoryCache;
+
+  @Before
+  public void setup() {
+    int slabSize = BlockCache._8K * 1024;
+    long totalMemory = 2 * slabSize;
+    blockCache = new BlockCache(true, totalMemory, slabSize);
+    blockDirectoryCache = new BlockDirectoryCache(blockCache);
+  }
+
+  @Test
+  public void validateEmptyOutputFile() {
+    blockDirectoryCache.renameCacheFile("foo", "bar");
+  }
+}


Mime
View raw message