hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mbau...@apache.org
Subject svn commit: r1293095 - in /hbase/trunk/src: main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/mapreduce/ main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hadoop/hbase/util/ test/java/org/apache/hadoop/hba...
Date Fri, 24 Feb 2012 06:06:54 GMT
Author: mbautin
Date: Fri Feb 24 06:06:53 2012
New Revision: 1293095

URL: http://svn.apache.org/viewvc?rev=1293095&view=rev
Log:
[jira] [HBASE-5442] Use builder pattern in StoreFile and HFile

Summary: Cleaning up the factory method explosion in HFile writer and StoreFile.
Now, adding a new parameter to HFile/StoreFile writer initialization will not
require modifying factory method invocations all over the codebase.

Test Plan:
Run unit tests
Deploy to dev cluster and run a load test

Reviewers: JIRA, stack, tedyu, Kannan, Karthik, Liyin

Reviewed By: stack

Differential Revision: https://reviews.facebook.net/D1893

Modified:
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Fri Feb 24 06:06:53 2012
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KeyComparator;
@@ -51,6 +52,8 @@ import org.apache.hadoop.hbase.util.FSUt
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.io.Writable;
 
+import com.google.common.base.Preconditions;
+
 /**
  * File format for hbase.
  * A file of sorted key/value pairs. Both keys and values are byte arrays.
@@ -232,33 +235,82 @@ public class HFile {
    * we want to be able to swap writer implementations.
    */
   public static abstract class WriterFactory {
-    protected Configuration conf;
-    protected CacheConfig cacheConf;
+    protected final Configuration conf;
+    protected final CacheConfig cacheConf;
+    protected FileSystem fs;
+    protected Path path;
+    protected FSDataOutputStream ostream;
+    protected int blockSize = HColumnDescriptor.DEFAULT_BLOCKSIZE;
+    protected Compression.Algorithm compression =
+        HFile.DEFAULT_COMPRESSION_ALGORITHM;
+    protected HFileDataBlockEncoder encoder = NoOpDataBlockEncoder.INSTANCE;
+    protected KeyComparator comparator;
 
     WriterFactory(Configuration conf, CacheConfig cacheConf) {
       this.conf = conf;
       this.cacheConf = cacheConf;
     }
 
-    public abstract Writer createWriter(FileSystem fs, Path path)
-        throws IOException;
+    public WriterFactory withPath(FileSystem fs, Path path) {
+      Preconditions.checkNotNull(fs);
+      Preconditions.checkNotNull(path);
+      this.fs = fs;
+      this.path = path;
+      return this;
+    }
 
-    public abstract Writer createWriter(FileSystem fs, Path path,
-        int blockSize, Compression.Algorithm compress,
-        HFileDataBlockEncoder dataBlockEncoder,
-        final KeyComparator comparator) throws IOException;
+    public WriterFactory withOutputStream(FSDataOutputStream ostream) {
+      Preconditions.checkNotNull(ostream);
+      this.ostream = ostream;
+      return this;
+    }
 
-    public abstract Writer createWriter(FileSystem fs, Path path,
-        int blockSize, String compress,
-        final KeyComparator comparator) throws IOException;
-
-    public abstract Writer createWriter(final FSDataOutputStream ostream,
-        final int blockSize, final String compress,
-        final KeyComparator comparator) throws IOException;
-
-    public abstract Writer createWriter(final FSDataOutputStream ostream,
-        final int blockSize, final Compression.Algorithm compress,
-        final KeyComparator c) throws IOException;
+    public WriterFactory withBlockSize(int blockSize) {
+      this.blockSize = blockSize;
+      return this;
+    }
+
+    public WriterFactory withCompression(Compression.Algorithm compression) {
+      Preconditions.checkNotNull(compression);
+      this.compression = compression;
+      return this;
+    }
+
+    public WriterFactory withCompression(String compressAlgo) {
+      Preconditions.checkNotNull(compression);
+      this.compression = AbstractHFileWriter.compressionByName(compressAlgo);
+      return this;
+    }
+
+    public WriterFactory withDataBlockEncoder(HFileDataBlockEncoder encoder) {
+      Preconditions.checkNotNull(encoder);
+      this.encoder = encoder;
+      return this;
+    }
+
+    public WriterFactory withComparator(KeyComparator comparator) {
+      Preconditions.checkNotNull(comparator);
+      this.comparator = comparator;
+      return this;
+    }
+
+    public Writer create() throws IOException {
+      if ((path != null ? 1 : 0) + (ostream != null ? 1 : 0) != 1) {
+        throw new AssertionError("Please specify exactly one of " +
+            "filesystem/path or path");
+      }
+      if (path != null) {
+        ostream = AbstractHFileWriter.createOutputStream(conf, fs, path);
+      }
+      return createWriter(fs, path, ostream, blockSize,
+          compression, encoder, comparator);
+    }
+
+    protected abstract Writer createWriter(FileSystem fs, Path path,
+        FSDataOutputStream ostream, int blockSize,
+        Compression.Algorithm compress,
+        HFileDataBlockEncoder dataBlockEncoder,
+        KeyComparator comparator) throws IOException;
   }
 
   /** The configuration key for HFile version to use for new files */

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java Fri Feb 24 06:06:53 2012
@@ -82,102 +82,32 @@ public class HFileWriterV1 extends Abstr
   private int blockNumber = 0;
 
   static class WriterFactoryV1 extends HFile.WriterFactory {
-
     WriterFactoryV1(Configuration conf, CacheConfig cacheConf) {
       super(conf, cacheConf);
     }
 
     @Override
-    public Writer createWriter(FileSystem fs, Path path) throws IOException {
-      return new HFileWriterV1(conf, cacheConf, fs, path);
-    }
-
-    @Override
-    public Writer createWriter(FileSystem fs, Path path, int blockSize,
+    public Writer createWriter(FileSystem fs, Path path,
+        FSDataOutputStream ostream, int blockSize,
         Algorithm compressAlgo, HFileDataBlockEncoder dataBlockEncoder,
         KeyComparator comparator)
         throws IOException {
-      return new HFileWriterV1(conf, cacheConf, fs, path, blockSize,
+      return new HFileWriterV1(conf, cacheConf, fs, path, ostream, blockSize,
           compressAlgo, dataBlockEncoder, comparator);
     }
-
-    @Override
-    public Writer createWriter(FileSystem fs, Path path, int blockSize,
-        String compressAlgoName, KeyComparator comparator)
-        throws IOException {
-      return new HFileWriterV1(conf, cacheConf, fs, path, blockSize,
-          compressAlgoName, comparator);
-    }
-
-    @Override
-    public Writer createWriter(final FSDataOutputStream ostream,
-        final int blockSize, final String compress,
-        final KeyComparator comparator) throws IOException {
-      return new HFileWriterV1(cacheConf, ostream, blockSize, compress,
-          comparator);
-    }
-
-    @Override
-    public Writer createWriter(final FSDataOutputStream ostream,
-        final int blockSize, final Compression.Algorithm compress,
-        final KeyComparator c) throws IOException {
-      return new HFileWriterV1(cacheConf, ostream, blockSize, compress,
-          NoOpDataBlockEncoder.INSTANCE, c);
-    }
-  }
-
-  /** Constructor that uses all defaults for compression and block size. */
-  public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
-      FileSystem fs, Path path)
-      throws IOException {
-    this(conf, cacheConf, fs, path, HFile.DEFAULT_BLOCKSIZE,
-        HFile.DEFAULT_COMPRESSION_ALGORITHM,
-        NoOpDataBlockEncoder.INSTANCE, null);
-  }
-
-  /**
-   * Constructor that takes a path, creates and closes the output stream. Takes
-   * compression algorithm name as string.
-   */
-  public HFileWriterV1(Configuration conf, CacheConfig cacheConf, FileSystem fs,
-      Path path, int blockSize, String compressAlgoName,
-      final KeyComparator comparator) throws IOException {
-    this(conf, cacheConf, fs, path, blockSize,
-        compressionByName(compressAlgoName), NoOpDataBlockEncoder.INSTANCE,
-        comparator);
   }
 
   /** Constructor that takes a path, creates and closes the output stream. */
   public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
-      FileSystem fs, Path path,
+      FileSystem fs, Path path, FSDataOutputStream ostream,
       int blockSize, Compression.Algorithm compress,
       HFileDataBlockEncoder blockEncoder,
       final KeyComparator comparator) throws IOException {
-    super(cacheConf, createOutputStream(conf, fs, path), path,
+    super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
         blockSize, compress, blockEncoder, comparator);
     SchemaMetrics.configureGlobally(conf);
   }
 
-  /** Constructor that takes a stream. */
-  public HFileWriterV1(CacheConfig cacheConf,
-      final FSDataOutputStream outputStream, final int blockSize,
-      final String compressAlgoName, final KeyComparator comparator)
-      throws IOException {
-    this(cacheConf, outputStream, blockSize,
-        Compression.getCompressionAlgorithmByName(compressAlgoName),
-        NoOpDataBlockEncoder.INSTANCE, comparator);
-  }
-
-  /** Constructor that takes a stream. */
-  public HFileWriterV1(CacheConfig cacheConf,
-      final FSDataOutputStream outputStream, final int blockSize,
-      final Compression.Algorithm compress,
-      HFileDataBlockEncoder blockEncoder, final KeyComparator comparator)
-      throws IOException {
-    super(cacheConf, outputStream, null, blockSize, compress,
-        blockEncoder, comparator);
-  }
-
   /**
    * If at block boundary, opens new block.
    *

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java Fri Feb 24 06:06:53 2012
@@ -83,100 +83,32 @@ public class HFileWriterV2 extends Abstr
   private long maxMemstoreTS = 0;
 
   static class WriterFactoryV2 extends HFile.WriterFactory {
-
     WriterFactoryV2(Configuration conf, CacheConfig cacheConf) {
       super(conf, cacheConf);
     }
 
     @Override
-    public Writer createWriter(FileSystem fs, Path path)
-        throws IOException {
-      return new HFileWriterV2(conf, cacheConf, fs, path);
-    }
-
-    @Override
-    public Writer createWriter(FileSystem fs, Path path, int blockSize,
+    public Writer createWriter(FileSystem fs, Path path,
+        FSDataOutputStream ostream, int blockSize,
         Compression.Algorithm compress, HFileDataBlockEncoder blockEncoder,
         final KeyComparator comparator) throws IOException {
-      return new HFileWriterV2(conf, cacheConf, fs, path, blockSize,
+      return new HFileWriterV2(conf, cacheConf, fs, path, ostream, blockSize,
           compress, blockEncoder, comparator);
     }
-
-    @Override
-    public Writer createWriter(FileSystem fs, Path path, int blockSize,
-        String compress, final KeyComparator comparator)
-        throws IOException {
-      return new HFileWriterV2(conf, cacheConf, fs, path, blockSize,
-          compress, comparator);
-    }
-
-    @Override
-    public Writer createWriter(final FSDataOutputStream ostream,
-        final int blockSize, final String compress,
-        final KeyComparator comparator) throws IOException {
-      return new HFileWriterV2(conf, cacheConf, ostream, blockSize, compress,
-          comparator);
-    }
-
-    @Override
-    public Writer createWriter(final FSDataOutputStream ostream,
-        final int blockSize, final Compression.Algorithm compress,
-        final KeyComparator c) throws IOException {
-      return new HFileWriterV2(conf, cacheConf, ostream, blockSize, compress,
-          c);
-    }
-  }
-
-  /** Constructor that uses all defaults for compression and block size. */
-  public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
-      FileSystem fs, Path path)
-      throws IOException {
-    this(conf, cacheConf, fs, path, HFile.DEFAULT_BLOCKSIZE,
-        HFile.DEFAULT_COMPRESSION_ALGORITHM, null, null);
-  }
-
-  /**
-   * Constructor that takes a path, creates and closes the output stream. Takes
-   * compression algorithm name as string.
-   */
-  public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs,
-      Path path, int blockSize, String compressAlgoName,
-      final KeyComparator comparator) throws IOException {
-    this(conf, cacheConf, fs, path, blockSize,
-        compressionByName(compressAlgoName), null, comparator);
   }
 
   /** Constructor that takes a path, creates and closes the output stream. */
-  public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs,
-      Path path, int blockSize, Compression.Algorithm compressAlgo,
-      HFileDataBlockEncoder blockEncoder,
+  public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
+      FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
+      Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
       final KeyComparator comparator) throws IOException {
-    super(cacheConf, createOutputStream(conf, fs, path), path,
-        blockSize, compressAlgo, blockEncoder, comparator);
+    super(cacheConf,
+        ostream == null ? createOutputStream(conf, fs, path) : ostream,
+        path, blockSize, compressAlgo, blockEncoder, comparator);
     SchemaMetrics.configureGlobally(conf);
     finishInit(conf);
   }
 
-  /** Constructor that takes a stream. */
-  public HFileWriterV2(final Configuration conf, final CacheConfig cacheConf,
-      final FSDataOutputStream outputStream, final int blockSize,
-      final String compressAlgoName, final KeyComparator comparator)
-      throws IOException {
-    this(conf, cacheConf, outputStream, blockSize,
-        Compression.getCompressionAlgorithmByName(compressAlgoName),
-        comparator);
-  }
-
-  /** Constructor that takes a stream. */
-  public HFileWriterV2(final Configuration conf, final CacheConfig cacheConf,
-      final FSDataOutputStream outputStream, final int blockSize,
-      final Compression.Algorithm compress, final KeyComparator comparator)
-      throws IOException {
-    super(cacheConf, outputStream, null, blockSize, compress, null,
-        comparator);
-    finishInit(conf);
-  }
-
   /** Additional initialization steps */
   private void finishInit(final Configuration conf) {
     if (fsBlockWriter != null)

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java Fri Feb 24 06:06:53 2012
@@ -173,10 +173,12 @@ public class HFileOutputFormat extends F
         Path familydir = new Path(outputdir, Bytes.toString(family));
         String compression = compressionMap.get(family);
         compression = compression == null ? defaultCompression : compression;
-        wl.writer =
-          HFile.getWriterFactoryNoCache(conf).createWriter(fs,
-          StoreFile.getUniqueFile(fs, familydir), blocksize,
-          compression, KeyValue.KEY_COMPARATOR);
+        wl.writer = HFile.getWriterFactoryNoCache(conf)
+            .withPath(fs, StoreFile.getUniqueFile(fs, familydir))
+            .withBlockSize(blocksize)
+            .withCompression(compression)
+            .withComparator(KeyValue.KEY_COMPARATOR)
+            .create();
         this.writers.put(family, wl);
         return wl;
       }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java Fri Feb 24 06:06:53 2012
@@ -543,10 +543,13 @@ public class LoadIncrementalHFiles exten
       Algorithm compression = familyDescriptor.getCompression();
       BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
 
-      halfWriter = new StoreFile.Writer(
-          fs, outFile, blocksize, compression, dataBlockEncoder,
-          conf, cacheConf,
-          KeyValue.COMPARATOR, bloomFilterType, 0);
+      halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
+          fs, blocksize)
+              .withFilePath(outFile)
+              .withCompression(compression)
+              .withDataBlockEncoder(dataBlockEncoder)
+              .withBloomType(bloomFilterType)
+              .build();
       HFileScanner scanner = halfReader.getScanner(false, false, false);
       scanner.seekTo();
       do {

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Fri Feb 24 06:06:53 2012
@@ -790,9 +790,14 @@ public class Store extends SchemaConfigu
     } else {
       writerCacheConf = cacheConf;
     }
-    StoreFile.Writer w = StoreFile.createWriter(fs, region.getTmpDir(),
-        blocksize, compression, dataBlockEncoder, comparator, conf,
-        writerCacheConf, family.getBloomFilterType(), maxKeyCount);
+    StoreFile.Writer w = new StoreFile.WriterBuilder(conf, writerCacheConf,
+        fs, blocksize)
+            .withOutputDir(region.getTmpDir())
+            .withDataBlockEncoder(dataBlockEncoder)
+            .withComparator(comparator)
+            .withBloomType(family.getBloomFilterType())
+            .withMaxKeyCount(maxKeyCount)
+            .build();
     // The store file writer's path does not include the CF name, so we need
     // to configure the HFile writer directly.
     SchemaConfigured sc = (SchemaConfigured) w.writer;

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Fri Feb 24 06:06:53 2012
@@ -69,13 +69,14 @@ import org.apache.hadoop.io.RawComparato
 import org.apache.hadoop.io.WritableUtils;
 
 import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Ordering;
 
 /**
  * A Store data file.  Stores usually have one or more of these files.  They
  * are produced by flushing the memstore to disk.  To
- * create, call {@link #createWriter(FileSystem, Path, int, Configuration, CacheConfig)}
+ * create, instantiate a writer using {@link StoreFile#WriterBuilder}
  * and append data. Be sure to add any metadata before calling close on the
  * Writer (Use the appendMetadata convenience methods). On close, a StoreFile
  * is sitting in the Filesystem.  To refer to it, create a StoreFile instance
@@ -681,64 +682,122 @@ public class StoreFile extends SchemaCon
     return tgt;
   }
 
-  /**
-   * Get a store file writer. Client is responsible for closing file when done.
-   *
-   * @param fs
-   * @param dir Path to family directory.  Makes the directory if doesn't exist.
-   * Creates a file with a unique name in this directory.
-   * @param blocksize size per filesystem block
-   * @return StoreFile.Writer
-   * @throws IOException
-   */
-  public static Writer createWriter(final FileSystem fs, final Path dir,
-      final int blocksize, Configuration conf, CacheConfig cacheConf)
-  throws IOException {
-    return createWriter(fs, dir, blocksize, null, NoOpDataBlockEncoder.INSTANCE,
-        null, conf, cacheConf, BloomType.NONE, 0);
-  }
+  public static class WriterBuilder {
+    private final Configuration conf;
+    private final CacheConfig cacheConf;
+    private final FileSystem fs;
+    private final int blockSize;
+
+    private Compression.Algorithm compressAlgo =
+        HFile.DEFAULT_COMPRESSION_ALGORITHM;
+    private HFileDataBlockEncoder dataBlockEncoder =
+        NoOpDataBlockEncoder.INSTANCE;
+    private KeyValue.KVComparator comparator = KeyValue.COMPARATOR;
+    private BloomType bloomType = BloomType.NONE;
+    private long maxKeyCount = 0;
+    private Path dir;
+    private Path filePath;
+
+    public WriterBuilder(Configuration conf, CacheConfig cacheConf,
+        FileSystem fs, int blockSize) {
+      this.conf = conf;
+      this.cacheConf = cacheConf;
+      this.fs = fs;
+      this.blockSize = blockSize;
+    }
 
-  /**
-   * Create a store file writer. Client is responsible for closing file when done.
-   * If metadata, add BEFORE closing using appendMetadata()
-   * @param fs
-   * @param dir Path to family directory.  Makes the directory if doesn't exist.
-   * Creates a file with a unique name in this directory.
-   * @param blocksize
-   * @param compressAlgo Compression algorithm. Pass null to get default.
-   * @param dataBlockEncoder Pass null to disable data block encoding.
-   * @param comparator Key-value comparator. Pass null to get default.
-   * @param conf HBase system configuration. used with bloom filters
-   * @param cacheConf Cache configuration and reference.
-   * @param bloomType column family setting for bloom filters
-   * @param maxKeyCount estimated maximum number of keys we expect to add
-   * @return HFile.Writer
-   * @throws IOException
-   */
-  public static StoreFile.Writer createWriter(final FileSystem fs,
-      final Path dir, final int blocksize,
-      Compression.Algorithm compressAlgo,
-      final HFileDataBlockEncoder dataBlockEncoder,
-      KeyValue.KVComparator comparator, final Configuration conf,
-      final CacheConfig cacheConf, BloomType bloomType, long maxKeyCount)
-      throws IOException {
+    /**
+     * Use either this method or {@link #withFilePath}, but not both.
+     * @param dir Path to column family directory. The directory is created if
+     *          does not exist. The file is given a unique name within this
+     *          directory.
+     * @return this (for chained invocation)
+     */
+    public WriterBuilder withOutputDir(Path dir) {
+      Preconditions.checkNotNull(dir);
+      this.dir = dir;
+      return this;
+    }
+
+    /**
+     * Use either this method or {@link #withOutputDir}, but not both.
+     * @param filePath the StoreFile path to write
+     * @return this (for chained invocation)
+     */
+    public WriterBuilder withFilePath(Path filePath) {
+      Preconditions.checkNotNull(filePath);
+      this.filePath = filePath;
+      return this;
+    }
 
-    if (!fs.exists(dir)) {
-      fs.mkdirs(dir);
+    public WriterBuilder withCompression(Compression.Algorithm compressAlgo) {
+      Preconditions.checkNotNull(compressAlgo);
+      this.compressAlgo = compressAlgo;
+      return this;
     }
-    Path path = getUniqueFile(fs, dir);
-    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
-      bloomType = BloomType.NONE;
+
+    public WriterBuilder withDataBlockEncoder(HFileDataBlockEncoder encoder) {
+      Preconditions.checkNotNull(encoder);
+      this.dataBlockEncoder = encoder;
+      return this;
     }
 
-    if (compressAlgo == null) {
-      compressAlgo = HFile.DEFAULT_COMPRESSION_ALGORITHM;
+    public WriterBuilder withComparator(KeyValue.KVComparator comparator) {
+      Preconditions.checkNotNull(comparator);
+      this.comparator = comparator;
+      return this;
     }
-    if (comparator == null) {
-      comparator = KeyValue.COMPARATOR;
+
+    public WriterBuilder withBloomType(BloomType bloomType) {
+      Preconditions.checkNotNull(bloomType);
+      this.bloomType = bloomType;
+      return this;
+    }
+
+    /**
+     * @param maxKeyCount estimated maximum number of keys we expect to add
+     * @return this (for chained invocation)
+     */
+    public WriterBuilder withMaxKeyCount(long maxKeyCount) {
+      this.maxKeyCount = maxKeyCount;
+      return this;
+    }
+
+    /**
+     * Create a store file writer. Client is responsible for closing file when
+     * done. If metadata, add BEFORE closing using
+     * {@link Writer#appendMetadata}.
+     */
+    public Writer build() throws IOException {
+      if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
+        throw new IllegalArgumentException("Either specify parent directory " +
+            "or file path");
+      }
+
+      if (dir == null) {
+        dir = filePath.getParent();
+      }
+
+      if (!fs.exists(dir)) {
+        fs.mkdirs(dir);
+      }
+
+      if (filePath == null) {
+        filePath = getUniqueFile(fs, dir);
+        if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
+          bloomType = BloomType.NONE;
+        }
+      }
+
+      if (compressAlgo == null) {
+        compressAlgo = HFile.DEFAULT_COMPRESSION_ALGORITHM;
+      }
+      if (comparator == null) {
+        comparator = KeyValue.COMPARATOR;
+      }
+      return new Writer(fs, filePath, blockSize, compressAlgo, dataBlockEncoder,
+          conf, cacheConf, comparator, bloomType, maxKeyCount);
     }
-    return new Writer(fs, path, blocksize, compressAlgo, dataBlockEncoder,
-        conf, cacheConf, comparator, bloomType, maxKeyCount);
   }
 
   /**
@@ -845,6 +904,7 @@ public class StoreFile extends SchemaCon
     boolean isTimeRangeTrackerSet = false;
 
     protected HFile.Writer writer;
+
     /**
      * Creates an HFile.Writer that also write helpful meta data.
      * @param fs file system to write to
@@ -858,7 +918,7 @@ public class StoreFile extends SchemaCon
      *        for Bloom filter size in {@link HFile} format version 1.
      * @throws IOException problem writing to FS
      */
-    public Writer(FileSystem fs, Path path, int blocksize,
+    private Writer(FileSystem fs, Path path, int blocksize,
         Compression.Algorithm compress,
         HFileDataBlockEncoder dataBlockEncoder, final Configuration conf,
         CacheConfig cacheConf,
@@ -866,9 +926,13 @@ public class StoreFile extends SchemaCon
         throws IOException {
       this.dataBlockEncoder = dataBlockEncoder != null ?
           dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
-      writer = HFile.getWriterFactory(conf, cacheConf).createWriter(
-          fs, path, blocksize,
-          compress, this.dataBlockEncoder, comparator.getRawComparator());
+      writer = HFile.getWriterFactory(conf, cacheConf)
+          .withPath(fs, path)
+          .withBlockSize(blocksize)
+          .withCompression(compress)
+          .withDataBlockEncoder(dataBlockEncoder)
+          .withComparator(comparator.getRawComparator())
+          .create();
 
       this.kvComparator = comparator;
 

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java Fri Feb 24 06:06:53 2012
@@ -105,9 +105,10 @@ public class CompressionTest {
   public static void doSmokeTest(FileSystem fs, Path path, String codec)
   throws Exception {
     Configuration conf = HBaseConfiguration.create();
-    HFile.Writer writer =
-      HFile.getWriterFactoryNoCache(conf).createWriter(
-      fs, path, HFile.DEFAULT_BLOCKSIZE, codec, null);
+    HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
+        .withPath(fs, path)
+        .withCompression(codec)
+        .create();
     writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval"));
     writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval"));
     writer.close();

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java Fri Feb 24 06:06:53 2012
@@ -190,9 +190,10 @@ public class HFilePerformanceEvaluation 
     @Override
     void setUp() throws Exception {
       writer =
-        HFile.getWriterFactoryNoCache(conf).createWriter(this.fs,
-            this.mf, RFILE_BLOCKSIZE, (Compression.Algorithm) null, null,
-            null);
+        HFile.getWriterFactoryNoCache(conf)
+            .withPath(fs, mf)
+            .withBlockSize(RFILE_BLOCKSIZE)
+            .create();
     }
 
     @Override

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java Fri Feb 24 06:06:53 2012
@@ -67,9 +67,11 @@ public class TestHalfStoreFileReader {
     FileSystem fs = FileSystem.get(conf);
     CacheConfig cacheConf = new CacheConfig(conf);
 
-    HFile.Writer w =
-      HFile.getWriterFactory(conf, cacheConf).createWriter(fs, p, 1024,
-        "none", KeyValue.KEY_COMPARATOR);
+    HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
+        .withPath(fs, p)
+        .withBlockSize(1024)
+        .withComparator(KeyValue.KEY_COMPARATOR)
+        .create();
 
     // write some things.
     List<KeyValue> items = genSomeKeys();

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java Fri Feb 24 06:06:53 2012
@@ -284,9 +284,15 @@ public class TestCacheOnWrite {
   public void writeStoreFile() throws IOException {
     Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
         "test_cache_on_write");
-    StoreFile.Writer sfw = StoreFile.createWriter(fs, storeFileParentDir,
-        DATA_BLOCK_SIZE, compress, encoder, KeyValue.COMPARATOR, conf,
-        cacheConf, BLOOM_TYPE, NUM_KV);
+    StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs,
+        DATA_BLOCK_SIZE)
+            .withOutputDir(storeFileParentDir)
+            .withCompression(compress)
+            .withDataBlockEncoder(encoder)
+            .withComparator(KeyValue.COMPARATOR)
+            .withBloomType(BLOOM_TYPE)
+            .withMaxKeyCount(NUM_KV)
+            .build();
 
     final int rowLen = 32;
     for (int i = 0; i < NUM_KV; ++i) {

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java Fri Feb 24 06:06:53 2012
@@ -80,7 +80,8 @@ public class TestHFile extends HBaseTest
   public void testEmptyHFile() throws IOException {
     if (cacheConf == null) cacheConf = new CacheConfig(conf);
     Path f = new Path(ROOT_DIR, getName());
-    Writer w = HFile.getWriterFactory(conf, cacheConf).createWriter(this.fs, f);
+    Writer w =
+        HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).create();
     w.close();
     Reader r = HFile.createReader(fs, f, cacheConf);
     r.loadFileInfo();
@@ -152,8 +153,11 @@ public class TestHFile extends HBaseTest
     if (cacheConf == null) cacheConf = new CacheConfig(conf);
     Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString());
     FSDataOutputStream fout = createFSOutput(ncTFile);
-    Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout,
-        minBlockSize, Compression.getCompressionAlgorithmByName(codec), null);
+    Writer writer = HFile.getWriterFactory(conf, cacheConf)
+        .withOutputStream(fout)
+        .withBlockSize(minBlockSize)
+        .withCompression(codec)
+        .create();
     LOG.info(writer);
     writeRecords(writer);
     fout.close();
@@ -229,9 +233,11 @@ public class TestHFile extends HBaseTest
     if (cacheConf == null) cacheConf = new CacheConfig(conf);
     Path mFile = new Path(ROOT_DIR, "meta.hfile");
     FSDataOutputStream fout = createFSOutput(mFile);
-    Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout,
-        minBlockSize, Compression.getCompressionAlgorithmByName(compress),
-        null);
+    Writer writer = HFile.getWriterFactory(conf, cacheConf)
+        .withOutputStream(fout)
+        .withBlockSize(minBlockSize)
+        .withCompression(compress)
+        .create();
     someTestingWithMetaBlock(writer);
     writer.close();
     fout.close();
@@ -259,8 +265,11 @@ public class TestHFile extends HBaseTest
         HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
       Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
       FSDataOutputStream fout = createFSOutput(mFile);
-      Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout,
-          minBlockSize, compressAlgo, null);
+      Writer writer = HFile.getWriterFactory(conf, cacheConf)
+          .withOutputStream(fout)
+          .withBlockSize(minBlockSize)
+          .withCompression(compressAlgo)
+          .create();
       writer.append("foo".getBytes(), "value".getBytes());
       writer.close();
       fout.close();
@@ -283,19 +292,22 @@ public class TestHFile extends HBaseTest
     if (cacheConf == null) cacheConf = new CacheConfig(conf);
     Path mFile = new Path(ROOT_DIR, "meta.tfile");
     FSDataOutputStream fout = createFSOutput(mFile);
-    Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout,
-      minBlockSize, (Compression.Algorithm) null, new KeyComparator() {
-        @Override
-        public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
-            int l2) {
-          return -Bytes.compareTo(b1, s1, l1, b2, s2, l2);
-
-        }
-        @Override
-        public int compare(byte[] o1, byte[] o2) {
-          return compare(o1, 0, o1.length, o2, 0, o2.length);
-        }
-      });
+    KeyComparator comparator = new KeyComparator() {
+      @Override
+      public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
+          int l2) {
+        return -Bytes.compareTo(b1, s1, l1, b2, s2, l2);
+      }
+      @Override
+      public int compare(byte[] o1, byte[] o2) {
+        return compare(o1, 0, o1.length, o2, 0, o2.length);
+      }
+    };
+    Writer writer = HFile.getWriterFactory(conf, cacheConf)
+        .withOutputStream(fout)
+        .withBlockSize(minBlockSize)
+        .withComparator(comparator)
+        .create();
     writer.append("3".getBytes(), "0".getBytes());
     writer.append("2".getBytes(), "0".getBytes());
     writer.append("1".getBytes(), "0".getBytes());

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java Fri Feb 24 06:06:53 2012
@@ -483,8 +483,12 @@ public class TestHFileBlockIndex {
       // Write the HFile
       {
         HFile.Writer writer =
-          HFile.getWriterFactory(conf, cacheConf).createWriter(fs,
-            hfilePath, SMALL_BLOCK_SIZE, compr, null, KeyValue.KEY_COMPARATOR);
+            HFile.getWriterFactory(conf, cacheConf)
+                .withPath(fs, hfilePath)
+                .withBlockSize(SMALL_BLOCK_SIZE)
+                .withCompression(compr)
+                .withComparator(KeyValue.KEY_COMPARATOR)
+                .create();
         Random rand = new Random(19231737);
 
         for (int i = 0; i < NUM_KV; ++i) {

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java Fri Feb 24 06:06:53 2012
@@ -161,9 +161,11 @@ public class TestHFilePerformance extend
 
     if ("HFile".equals(fileType)){
         System.out.println("HFile write method: ");
-        HFile.Writer writer =
-          HFile.getWriterFactoryNoCache(conf).createWriter(fout,
-             minBlockSize, codecName, null);
+        HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
+            .withOutputStream(fout)
+            .withBlockSize(minBlockSize)
+            .withCompression(codecName)
+            .create();
 
         // Writing value in one shot.
         for (long l=0; l<rows; l++ ) {

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java Fri Feb 24 06:06:53 2012
@@ -121,9 +121,11 @@ public class TestHFileSeek extends TestC
     long totalBytes = 0;
     FSDataOutputStream fout = createFSOutput(path, fs);
     try {
-      Writer writer =
-        HFile.getWriterFactoryNoCache(conf).createWriter(fout,
-          options.minBlockSize, options.compress, null);
+      Writer writer = HFile.getWriterFactoryNoCache(conf)
+          .withOutputStream(fout)
+          .withBlockSize(options.minBlockSize)
+          .withCompression(options.compress)
+          .create();
       try {
         BytesWritable key = new BytesWritable();
         BytesWritable val = new BytesWritable();

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java Fri Feb 24 06:06:53 2012
@@ -75,8 +75,13 @@ public class TestHFileWriterV2 {
         "testHFileFormatV2");
 
     final Compression.Algorithm COMPRESS_ALGO = Compression.Algorithm.GZ;
-    HFileWriterV2 writer = new HFileWriterV2(conf, new CacheConfig(conf), fs,
-        hfilePath, 4096, COMPRESS_ALGO, null, KeyValue.KEY_COMPARATOR);
+    HFileWriterV2 writer = (HFileWriterV2)
+        new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf))
+            .withPath(fs, hfilePath)
+            .withBlockSize(4096)
+            .withCompression(COMPRESS_ALGO)
+            .withComparator(KeyValue.KEY_COMPARATOR)
+            .create();
 
     long totalKeyLength = 0;
     long totalValueLength = 0;

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java Fri Feb 24 06:06:53 2012
@@ -47,8 +47,10 @@ public class TestReseekTo {
     FSDataOutputStream fout = TEST_UTIL.getTestFileSystem().create(ncTFile);
     CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
     HFile.Writer writer = HFile.getWriterFactory(
-        TEST_UTIL.getConfiguration(), cacheConf).createWriter(
-            fout, 4000, "none", null);
+        TEST_UTIL.getConfiguration(), cacheConf)
+            .withOutputStream(fout)
+            .withBlockSize(4000)
+            .create();
     int numberOfKeys = 1000;
 
     String valueString = "Value";

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java Fri Feb 24 06:06:53 2012
@@ -46,9 +46,10 @@ public class TestSeekTo extends HBaseTes
     Path ncTFile = new Path(this.testDir, "basic.hfile");
     FSDataOutputStream fout = this.fs.create(ncTFile);
     int blocksize = toKV("a").getLength() * 3;
-    HFile.Writer writer =
-      HFile.getWriterFactoryNoCache(conf).createWriter(fout,
-        blocksize, "none", null);
+    HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
+        .withOutputStream(fout)
+        .withBlockSize(blocksize)
+        .create();
     // 4 bytes * 3 * 2 for each key/value +
     // 3 for keys, 15 for values = 42 (woot)
     writer.append(toKV("c"));

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java Fri Feb 24 06:06:53 2012
@@ -207,10 +207,12 @@ public class TestLoadIncrementalHFiles {
       byte[] family, byte[] qualifier,
       byte[] startKey, byte[] endKey, int numRows) throws IOException
   {
-    HFile.Writer writer =
-      HFile.getWriterFactory(conf, new CacheConfig(conf)).createWriter(fs, path,
-        BLOCKSIZE, COMPRESSION,
-        KeyValue.KEY_COMPARATOR);
+    HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
+        .withPath(fs, path)
+        .withBlockSize(BLOCKSIZE)
+        .withCompression(COMPRESSION)
+        .withComparator(KeyValue.KEY_COMPARATOR)
+        .create();
     long now = System.currentTimeMillis();
     try {
       // subtract 2 since iterateOnSplits doesn't include boundary keys

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java Fri Feb 24 06:06:53 2012
@@ -183,9 +183,13 @@ public class CreateRandomStoreFile {
           Integer.valueOf(cmdLine.getOptionValue(INDEX_BLOCK_SIZE_OPTION)));
     }
 
-    StoreFile.Writer sfw = StoreFile.createWriter(fs, outputDir, blockSize,
-        compr, null, KeyValue.COMPARATOR, conf, new CacheConfig(conf),
-        bloomType, numKV);
+    StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf,
+        new CacheConfig(conf), fs, blockSize)
+            .withOutputDir(outputDir)
+            .withCompression(compr)
+            .withBloomType(bloomType)
+            .withMaxKeyCount(numKV)
+            .build();
 
     rand = new Random();
     LOG.info("Writing " + numKV + " key/value pairs");

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java Fri Feb 24 06:06:53 2012
@@ -349,11 +349,14 @@ public class HFileReadWriteTest {
         null);
     Store store = new Store(outputDir, region, columnDescriptor, fs, conf);
 
-    StoreFile.Writer writer =
-        StoreFile.createWriter(fs, outputDir, blockSize, compression,
-            dataBlockEncoder, KeyValue.COMPARATOR, this.conf,
-            new CacheConfig(conf), bloomType,
-            maxKeyCount);
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf,
+        new CacheConfig(conf), fs, blockSize)
+            .withOutputDir(outputDir)
+            .withCompression(compression)
+            .withDataBlockEncoder(dataBlockEncoder)
+            .withBloomType(bloomType)
+            .withMaxKeyCount(maxKeyCount)
+            .build();
 
     StatisticsPrinter statsPrinter = new StatisticsPrinter();
     statsPrinter.startThread();

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java Fri Feb 24 06:06:53 2012
@@ -294,9 +294,11 @@ public class TestCompoundBloomFilter {
     conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
     cacheConf = new CacheConfig(conf);
 
-    StoreFile.Writer w = StoreFile.createWriter(fs,
-        TEST_UTIL.getDataTestDir(), BLOCK_SIZES[t], null, null, null, conf,
-        cacheConf, bt, 0);
+    StoreFile.Writer w = new StoreFile.WriterBuilder(conf, cacheConf, fs,
+        BLOCK_SIZES[t])
+            .withOutputDir(TEST_UTIL.getDataTestDir())
+            .withBloomType(bt)
+            .build();
 
     assertTrue(w.hasGeneralBloom());
     assertTrue(w.getGeneralBloomWriter() instanceof CompoundBloomFilterWriter);

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java Fri Feb 24 06:06:53 2012
@@ -71,8 +71,10 @@ public class TestFSErrorsExposed {
         "regionname"), "familyname");
     FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
     CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
-    StoreFile.Writer writer = StoreFile.createWriter(fs, hfilePath, 2*1024,
-        util.getConfiguration(), cacheConf);
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(
+        util.getConfiguration(), cacheConf, fs, 2*1024)
+            .withOutputDir(hfilePath)
+            .build();
     TestStoreFile.writeStoreFile(
         writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
 
@@ -116,8 +118,10 @@ public class TestFSErrorsExposed {
         "regionname"), "familyname");
     FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
     CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
-    StoreFile.Writer writer = StoreFile.createWriter(fs, hfilePath, 2 * 1024,
-        util.getConfiguration(), cacheConf);
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(
+        util.getConfiguration(), cacheConf, fs, 2 * 1024)
+            .withOutputDir(hfilePath)
+            .build();
     TestStoreFile.writeStoreFile(
         writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
 

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java Fri Feb 24 06:06:53 2012
@@ -82,7 +82,11 @@ public class TestHRegionServerBulkLoad {
       byte[] qualifier, byte[] value, int numRows) throws IOException {
     HFile.Writer writer = HFile
         .getWriterFactory(conf, new CacheConfig(conf))
-        .createWriter(fs, path, BLOCKSIZE, COMPRESSION, KeyValue.KEY_COMPARATOR);
+        .withPath(fs, path)
+        .withBlockSize(BLOCKSIZE)
+        .withCompression(COMPRESSION)
+        .withComparator(KeyValue.KEY_COMPARATOR)
+        .create();
     long now = System.currentTimeMillis();
     try {
       // subtract 2 since iterateOnSplits doesn't include boundary keys

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Fri Feb 24 06:06:53 2012
@@ -265,8 +265,10 @@ public class TestStore extends TestCase 
     long seqid = f.getMaxSequenceId();
     Configuration c = HBaseConfiguration.create();
     FileSystem fs = FileSystem.get(c);
-    StoreFile.Writer w = StoreFile.createWriter(fs, storedir,
-        StoreFile.DEFAULT_BLOCKSIZE_SMALL, c, new CacheConfig(c));
+    StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
+        fs, StoreFile.DEFAULT_BLOCKSIZE_SMALL)
+            .withOutputDir(storedir)
+            .build();
     w.appendMetadata(seqid + 1, false);
     w.close();
     this.store.close();

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Fri Feb 24 06:06:53 2012
@@ -89,9 +89,12 @@ public class TestStoreFile extends HBase
    */
   public void testBasicHalfMapFile() throws Exception {
     // Make up a directory hierarchy that has a regiondir and familyname.
-    StoreFile.Writer writer = StoreFile.createWriter(this.fs,
-      new Path(new Path(this.testDir, "regionname"), "familyname"), 2 * 1024,
-      conf, cacheConf);
+    Path outputDir = new Path(new Path(this.testDir, "regionname"),
+        "familyname");
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
+        this.fs, 2 * 1024)
+            .withOutputDir(outputDir)
+            .build();
     writeStoreFile(writer);
     checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE));
@@ -131,8 +134,10 @@ public class TestStoreFile extends HBase
     Path storedir = new Path(new Path(this.testDir, "regionname"), "familyname");
     Path dir = new Path(storedir, "1234567890");
     // Make a store file and write data to it.
-    StoreFile.Writer writer = StoreFile.createWriter(this.fs, dir, 8 * 1024,
-        conf, cacheConf);
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
+        this.fs, 8 * 1024)
+            .withOutputDir(dir)
+            .build();
     writeStoreFile(writer);
     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
@@ -391,10 +396,12 @@ public class TestStoreFile extends HBase
 
     // write the file
     Path f = new Path(ROOT_DIR, getName());
-    StoreFile.Writer writer =
-        new StoreFile.Writer(fs, f, StoreFile.DEFAULT_BLOCKSIZE_SMALL,
-            HFile.DEFAULT_COMPRESSION_ALGORITHM, null, conf, cacheConf,
-            KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 2000);
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
+        StoreFile.DEFAULT_BLOCKSIZE_SMALL)
+            .withFilePath(f)
+            .withBloomType(StoreFile.BloomType.ROW)
+            .withMaxKeyCount(2000)
+            .build();
     bloomWriteRead(writer, fs);
   }
 
@@ -409,10 +416,11 @@ public class TestStoreFile extends HBase
     // write the file
     Path f = new Path(ROOT_DIR, getName());
 
-    StoreFile.Writer writer = new StoreFile.Writer(fs, f,
-        StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
-        null, conf, cacheConf, KeyValue.COMPARATOR, StoreFile.BloomType.NONE,
-        2000);
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
+        fs, StoreFile.DEFAULT_BLOCKSIZE_SMALL)
+            .withFilePath(f)
+            .withMaxKeyCount(2000)
+            .build();
 
     // add delete family
     long now = System.currentTimeMillis();
@@ -477,10 +485,12 @@ public class TestStoreFile extends HBase
     for (int x : new int[]{0,1}) {
       // write the file
       Path f = new Path(ROOT_DIR, getName() + x);
-      StoreFile.Writer writer = new StoreFile.Writer(fs, f,
-          StoreFile.DEFAULT_BLOCKSIZE_SMALL,
-          HFile.DEFAULT_COMPRESSION_ALGORITHM,
-          null, conf, cacheConf, KeyValue.COMPARATOR, bt[x], expKeys[x]);
+      StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
+          fs, StoreFile.DEFAULT_BLOCKSIZE_SMALL)
+              .withFilePath(f)
+              .withBloomType(bt[x])
+              .withMaxKeyCount(expKeys[x])
+              .build();
 
       long now = System.currentTimeMillis();
       for (int i = 0; i < rowCount*2; i += 2) { // rows
@@ -550,10 +560,12 @@ public class TestStoreFile extends HBase
     conf.setInt(HFile.FORMAT_VERSION_KEY, 1);
 
     // this should not create a bloom because the max keys is too small
-    StoreFile.Writer writer = new StoreFile.Writer(fs, f,
-        StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
-        null, conf, cacheConf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW,
-        2000);
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
+        StoreFile.DEFAULT_BLOCKSIZE_SMALL)
+            .withFilePath(f)
+            .withBloomType(StoreFile.BloomType.ROW)
+            .withMaxKeyCount(2000)
+            .build();
     assertFalse(writer.hasGeneralBloom());
     writer.close();
     fs.delete(f, true);
@@ -562,22 +574,25 @@ public class TestStoreFile extends HBase
         Integer.MAX_VALUE);
 
     // TODO: commented out because we run out of java heap space on trunk
-    /*
     // the below config caused IllegalArgumentException in our production cluster
     // however, the resulting byteSize is < MAX_INT, so this should work properly
-    writer = new StoreFile.Writer(fs, f,
-        StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
-        conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 272446963);
-    assertTrue(writer.hasBloom());
+    writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
+        StoreFile.DEFAULT_BLOCKSIZE_SMALL)
+            .withFilePath(f)
+            .withBloomType(StoreFile.BloomType.ROW)
+            .withMaxKeyCount(27244696)
+            .build();
+    assertTrue(writer.hasGeneralBloom());
     bloomWriteRead(writer, fs);
-    */
 
     // this, however, is too large and should not create a bloom
     // because Java can't create a contiguous array > MAX_INT
-    writer = new StoreFile.Writer(fs, f,
-        StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
-        null, conf, cacheConf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW,
-        Integer.MAX_VALUE);
+    writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
+        StoreFile.DEFAULT_BLOCKSIZE_SMALL)
+            .withFilePath(f)
+            .withBloomType(StoreFile.BloomType.ROW)
+            .withMaxKeyCount(Integer.MAX_VALUE)
+            .build();
     assertFalse(writer.hasGeneralBloom());
     writer.close();
     fs.delete(f, true);
@@ -668,8 +683,10 @@ public class TestStoreFile extends HBase
     Path storedir = new Path(new Path(this.testDir, "regionname"),
     "familyname");
     Path dir = new Path(storedir, "1234567890");
-    StoreFile.Writer writer = StoreFile.createWriter(this.fs, dir, 8 * 1024,
-        conf, cacheConf);
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
+        this.fs, 8 * 1024)
+            .withOutputDir(dir)
+            .build();
 
     List<KeyValue> kvList = getKeyValueSet(timestamps,numRows,
         family, qualifier);
@@ -838,10 +855,11 @@ public class TestStoreFile extends HBase
       totalSize += kv.getLength() + 1;
     }
     int blockSize = totalSize / numBlocks;
-    StoreFile.Writer writer = new StoreFile.Writer(fs, path, blockSize,
-        HFile.DEFAULT_COMPRESSION_ALGORITHM,
-        null, conf, cacheConf, KeyValue.COMPARATOR, StoreFile.BloomType.NONE,
-        2000);
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
+        blockSize)
+            .withFilePath(path)
+            .withMaxKeyCount(2000)
+            .build();
     // We'll write N-1 KVs to ensure we don't write an extra block
     kvs.remove(kvs.size()-1);
     for (KeyValue kv : kvs) {
@@ -867,15 +885,12 @@ public class TestStoreFile extends HBase
             dataBlockEncoderAlgo,
             dataBlockEncoderAlgo);
     cacheConf = new CacheConfig(conf);
-    StoreFile.Writer writer = new StoreFile.Writer(fs,
-        path, HFile.DEFAULT_BLOCKSIZE,
-        HFile.DEFAULT_COMPRESSION_ALGORITHM,
-        dataBlockEncoder,
-        conf,
-        cacheConf,
-        KeyValue.COMPARATOR,
-        StoreFile.BloomType.NONE,
-        2000);
+    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
+        HFile.DEFAULT_BLOCKSIZE)
+            .withFilePath(path)
+            .withDataBlockEncoder(dataBlockEncoder)
+            .withMaxKeyCount(2000)
+            .build();
     writer.close();
     
     StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java?rev=1293095&r1=1293094&r2=1293095&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java Fri Feb 24 06:06:53 2012
@@ -197,7 +197,7 @@ public class TestWALReplay {
     HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
     Path f =  new Path(basedir, "hfile");
     HFile.Writer writer =
-      HFile.getWriterFactoryNoCache(conf).createWriter(this.fs, f);
+      HFile.getWriterFactoryNoCache(conf).withPath(fs, f).create();
     byte [] family = htd.getFamilies().iterator().next().getName();
     byte [] row = Bytes.toBytes(tableNameStr);
     writer.append(new KeyValue(row, family, family, row));



Mime
View raw message