geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ash...@apache.org
Subject [4/7] incubator-geode git commit: GEODE-10: Refactor HdfsStore api to match spec
Date Fri, 17 Jul 2015 06:03:31 GMT
GEODE-10: Refactor HdfsStore api to match spec

* Currently HdfsStore's configuration object is nested and a user needs to
  create multiple sub objects to manage the store instance. This is less usable
  and gets confusing at times. User also gets exposed to a lot of internal
  details. So replacing nested configuration with a flat structure will be
  better.
* Rename members


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/3772869d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/3772869d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/3772869d

Branch: refs/heads/develop
Commit: 3772869d02148eec8b5ce97fbf1af9415bccd98c
Parents: e920ae6
Author: Ashvin Agrawal <ashvin@pivotal.io>
Authored: Thu Jun 18 15:30:49 2015 -0700
Committer: Ashvin Agrawal <ashvin@apache.org>
Committed: Mon Jul 13 10:47:40 2015 -0700

----------------------------------------------------------------------
 .../cache/hdfs/HDFSEventQueueAttributes.java    |  72 --
 .../hdfs/HDFSEventQueueAttributesFactory.java   | 160 -----
 .../gemstone/gemfire/cache/hdfs/HDFSStore.java  | 332 +++++++---
 .../gemfire/cache/hdfs/HDFSStoreFactory.java    | 233 +++----
 .../gemfire/cache/hdfs/HDFSStoreMutator.java    | 307 ++++-----
 .../internal/HDFSEventQueueAttributesImpl.java  | 179 -----
 .../hdfs/internal/HDFSIntegrationUtil.java      | 121 ++--
 .../hdfs/internal/HDFSStoreConfigHolder.java    | 656 +++++++++----------
 .../cache/hdfs/internal/HDFSStoreCreation.java  | 189 +++---
 .../hdfs/internal/HDFSStoreFactoryImpl.java     |   2 +
 .../cache/hdfs/internal/HDFSStoreImpl.java      |  87 ++-
 .../hdfs/internal/HDFSStoreMutatorImpl.java     | 279 +++-----
 .../SizeTieredHdfsCompactionConfigHolder.java   |  74 ---
 .../internal/hoplog/HDFSCompactionManager.java  |  27 +-
 .../hoplog/HDFSUnsortedHoplogOrganizer.java     |  10 +-
 .../hoplog/HdfsSortedOplogOrganizer.java        |  22 +-
 .../internal/cache/GemFireCacheImpl.java        |   2 +-
 .../internal/cache/xmlcache/CacheCreation.java  |   2 +-
 .../internal/cache/xmlcache/CacheXmlParser.java | 108 +--
 .../cli/commands/HDFSStoreCommands.java         |  81 +--
 .../cli/functions/AlterHDFSStoreFunction.java   |  24 +-
 .../cli/functions/CreateHDFSStoreFunction.java  |  18 +-
 .../ColocatedRegionWithHDFSDUnitTest.java       |  18 +-
 .../hdfs/internal/HDFSConfigJUnitTest.java      | 146 ++---
 .../internal/HdfsStoreMutatorJUnitTest.java     | 147 ++---
 .../internal/hoplog/BaseHoplogTestCase.java     |   7 +-
 .../hoplog/HDFSCacheLoaderJUnitTest.java        |   4 +-
 .../hoplog/HDFSCompactionManagerJUnitTest.java  |   8 +-
 .../HDFSUnsortedHoplogOrganizerJUnitTest.java   |   2 +-
 .../HdfsSortedOplogOrganizerJUnitTest.java      |  10 +-
 .../hoplog/TieredCompactionJUnitTest.java       |  25 +-
 .../hoplog/mapreduce/HoplogUtilJUnitTest.java   |   7 +-
 .../cache/HDFSRegionOperationsJUnitTest.java    |   5 +-
 .../commands/HDFSStoreCommandsJUnitTest.java    | 124 ++--
 .../AlterHDFSStoreFunctionJUnitTest.java        | 146 +----
 .../CreateHDFSStoreFunctionJUnitTest.java       | 156 +----
 .../DescribeHDFSStoreFunctionJUnitTest.java     |  98 +--
 37 files changed, 1434 insertions(+), 2454 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3772869d/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSEventQueueAttributes.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSEventQueueAttributes.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSEventQueueAttributes.java
deleted file mode 100644
index ef7e863..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSEventQueueAttributes.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *========================================================================
- */
-
-package com.gemstone.gemfire.cache.hdfs;
-
-/**
- * {@link HDFSEventQueueAttributes} represents the attributes of the buffer where events are 
- * accumulated before they are persisted to HDFS  
- * 
- * @author Hemant Bhanawat
- */
-public interface HDFSEventQueueAttributes {
-
-  /**
-   * The Disk store that is required for overflow and persistence
-   * @return    String
-   */
-  public String getDiskStoreName();
-
-  /**
-   * The maximum memory after which the data needs to be overflowed to disk
-   * @return    int
-   */
-  public int getMaximumQueueMemory();
-
-  /**
-   * Represents the size of a batch per bucket that gets delivered
-   * from the HDFS Event Queue to HDFS. A higher value means that 
-   * less number of bigger batches are persisted to HDFS and hence 
-   * big files are created on HDFS. But, bigger batches consume memory.  
-   *  
-   * This value is an indication. Batches per bucket with size less than the specified
-   * are sent to HDFS if interval specified by {@link #getBatchTimeInterval()}
-   * has elapsed.
-   * @return    int
-   */
-  public int getBatchSizeMB();
-  
-  /**
-   * Represents the batch time interval for a HDFS queue. This is the maximum time interval
-   * that can elapse before a batch of data from a bucket is sent to HDFS.
-   *
-   * @return  int
-   */
-  public int getBatchTimeInterval();
-  
-  /**
-   * Represents whether the  HDFS Event Queue is configured to be persistent or non-persistent
-   * @return    boolean
-   */
-  public boolean isPersistent();
-
-  /**
-   * Represents whether or not the writing to the disk is synchronous.
-   * 
-   * @return boolean 
-   */
-  public boolean isDiskSynchronous();
-  
-  /**
-   * Number of threads in VM consuming the events.
-   * default is one.
-   * 
-   * @return int
-   */
-  public int getDispatcherThreads();
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3772869d/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSEventQueueAttributesFactory.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSEventQueueAttributesFactory.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSEventQueueAttributesFactory.java
deleted file mode 100644
index fc09b7a..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSEventQueueAttributesFactory.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *========================================================================
- */
-
-package com.gemstone.gemfire.cache.hdfs;
-
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSEventQueueAttributesImpl;
-import com.gemstone.gemfire.cache.wan.GatewaySender;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-
-/**
- * Factory to create {@link HDFSEventQueueAttributes} . 
- * {@link HDFSEventQueueAttributes} represents the attributes of the buffer where events are 
- * accumulated before they are persisted to HDFS  
- * 
- * @author Hemant Bhanawat
- */
-public class HDFSEventQueueAttributesFactory {
-
-  /**
-   * The default batch size
-   */
-  public static final int DEFAULT_BATCH_SIZE_MB = 32;
-
-  /**
-   * The default batch time interval in milliseconds
-   */
-  public static final int DEFAULT_BATCH_TIME_INTERVAL_MILLIS = 60000;
-
-  /**
-   * By default, queue is created for a read write HDFS store 
-   */
-  public static final boolean DEFAULT_FOR_WRITEONLY_HDFSSTORE = false;
-  
-  public HDFSEventQueueAttributesFactory() {
-  }
-
-  /**
-   * Copy constructor for {@link HDFSEventQueueAttributes}. The method creates
-   * an instance with same attribute values as of {@code attr}
-   * 
-   * @param attr
-   */
-  public HDFSEventQueueAttributesFactory(HDFSEventQueueAttributes attr) {
-    setDiskStoreName(attr.getDiskStoreName());
-    setMaximumQueueMemory(attr.getMaximumQueueMemory());
-    setBatchTimeInterval(attr.getBatchTimeInterval());
-    setBatchSizeMB(attr.getBatchSizeMB());
-    setPersistent(attr.isPersistent());
-    setDiskSynchronous(attr.isDiskSynchronous());
-    setDispatcherThreads(attr.getDispatcherThreads());
-  }
-  
-  /**
-   * Sets the disk store name for overflow or persistence.
-   * 
-   * @param name
-   */
-  public HDFSEventQueueAttributesFactory setDiskStoreName(String name) {
-    this.diskStoreName = name;
-    return this;
-  }
-  
-  /**
-   * Sets the maximum amount of memory (in MB) for an
-   * HDFS Event Queue.
-   * 
-   * @param memory
-   *          The maximum amount of memory (in MB) for an
-   *          HDFS Event Queue
-   */
-  public HDFSEventQueueAttributesFactory setMaximumQueueMemory(int memory) {
-    this.maximumQueueMemory = memory;
-    return this;
-  }
-  
-  /**
-   * Sets the batch time interval for a HDFS queue. This is the maximum time interval
-   * that can elapse before a batch of data from a bucket is sent to HDFS.
-   *
-   * @param intervalMillis
-   *          int time interval in milliseconds. Default is 60000 ms.
-   */
-  public HDFSEventQueueAttributesFactory setBatchTimeInterval(int intervalMillis){
-    this.batchIntervalMillis = intervalMillis;
-    return this;
-  }
-
- 
-  /**
-   * Sets the size of a batch per bucket that gets delivered
-   * from the HDFS Event Queue to HDFS. Setting this to a higher value
-   * would mean that less number of bigger batches are persisted to
-   * HDFS and hence big files are created on HDFS. But, bigger batches
-   * would consume memory.  
-   *  
-   * This value is an indication. Batches per bucket with size less than the specified
-   * are sent to HDFS if interval specified by {@link #setBatchTimeInterval(int)}
-   * has elapsed.
-   *  
-   * @param size
-   *          The size of batches sent to HDFS in MB. Default is 32 MB.
-   */
-  public HDFSEventQueueAttributesFactory setBatchSizeMB(int size){
-    this.batchSize = size;
-    return this;
-  }
-  
-  /**
-   * Sets whether the HDFS Event Queue is persistent or not.
-   * 
-   * @param isPersistent
-   *          Whether to enable persistence for an HDFS Event Queue..
-   */
-  public HDFSEventQueueAttributesFactory setPersistent(boolean isPersistent) {
-    this.isPersistenceEnabled = isPersistent;
-    return this;
-  }
-  /**
-   * Sets whether or not the writing to the disk is synchronous.
-   *
-   * @param isSynchronous
-   *          boolean if true indicates synchronous writes
-   */
-  public HDFSEventQueueAttributesFactory setDiskSynchronous(boolean isSynchronous) {
-    this.diskSynchronous = isSynchronous;
-    return this;
-  }
-  
-  /**
-   * Number of threads in VM to consumer the events
-   * default is one.
-   * 
-   * @param dispatcherThreads
-   */
-  public void setDispatcherThreads(int dispatcherThreads) {
-  	this.dispatcherThreads = dispatcherThreads;
-  }
-  
-  /**
-   * Creates the <code>HDFSEventQueueAttributes</code>.    * 
-   * 
-   */
-  public HDFSEventQueueAttributes create() {
-    return new HDFSEventQueueAttributesImpl(this.diskStoreName, this.maximumQueueMemory, 
-        this.batchSize, this.isPersistenceEnabled,  this.batchIntervalMillis, this.diskSynchronous, this.dispatcherThreads);
-  }
-
-  private int maximumQueueMemory = GatewaySender.DEFAULT_MAXIMUM_QUEUE_MEMORY;
-  private int batchIntervalMillis = HDFSEventQueueAttributesFactory.DEFAULT_BATCH_TIME_INTERVAL_MILLIS;
-  private int batchSize = HDFSEventQueueAttributesFactory.DEFAULT_BATCH_SIZE_MB;
-  private boolean diskSynchronous = GatewaySender.DEFAULT_DISK_SYNCHRONOUS; 
-  private boolean isPersistenceEnabled = GatewaySender.DEFAULT_PERSISTENCE_ENABLED;
-  private int dispatcherThreads = GatewaySender.DEFAULT_HDFS_DISPATCHER_THREADS;
-  private String diskStoreName = null;
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3772869d/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
index f5bd943..c9b399d 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
@@ -8,18 +8,22 @@
 
 package com.gemstone.gemfire.cache.hdfs;
 
+import com.gemstone.gemfire.cache.wan.GatewaySender;
+
 /**
- * Provides HDFS storage for one or more regions. The regions in the same HDFS
- * store will share the same persistence attributes.
+ * HDFS stores provide a means of persisting data on HDFS. There can be multiple
+ * instance of HDFS stores in a cluster. The regions connected using a HDFS
+ * store will share the same HDFS persistence attributes. A user will normally
+ * perform the following steps to enable HDFS persistence for a region:
+ * <ol>
+ * <li>[Optional] Creates a Disk store for reliability
+ * <li>HDFS buffers will use local persistence till it is persisted on HDFS
+ * <li>Creates a HDFS Store
+ * <li>Creates a Region connected to HDFS Store Uses region API to create and
+ * query data
+ * </ol>
  * <p>
  * Instances of this interface are created using {@link HDFSStoreFactory#create}
- * So to create a <code>HDFSStore</code> named <code>myDiskStore</code> do
- * this:
- * 
- * <PRE>
- * new HDFSStoreFactory().create(&quot;myDiskStore&quot;);
- * </PRE>
- * <p>
  * 
  * @author Hemant Bhanawat
  * @author Ashvin Agrawal
@@ -29,153 +33,265 @@ public interface HDFSStore {
   public static final String DEFAULT_HOME_DIR = "gemfire";
   public static final float DEFAULT_BLOCK_CACHE_SIZE = 10f;
   public static final int DEFAULT_MAX_WRITE_ONLY_FILE_SIZE = 256; 
-  public static final int DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL = 3600; 
+  public static final int DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL = 3600;
+  
+  public static final int DEFAULT_BATCH_SIZE_MB = 32;
+  public static final int DEFAULT_BATCH_INTERVAL_MILLIS = 60000;
+  public static final boolean DEFAULT_WRITEONLY_HDFSSTORE = false;
+  public static final boolean DEFAULT_BUFFER_PERSISTANCE = GatewaySender.DEFAULT_PERSISTENCE_ENABLED;
+  public static final boolean DEFAULT_DISK_SYNCHRONOUS = GatewaySender.DEFAULT_DISK_SYNCHRONOUS;
+  public static final int DEFAULT_MAX_BUFFER_MEMORY = GatewaySender.DEFAULT_MAXIMUM_QUEUE_MEMORY;
+  public static final int DEFAULT_DISPATCHER_THREADS = GatewaySender.DEFAULT_HDFS_DISPATCHER_THREADS;
+  
+  public static final boolean DEFAULT_MINOR_COMPACTION = true;
+  public static final int DEFAULT_MINOR_COMPACTION_THREADS = 10;
+  public static final boolean DEFAULT_MAJOR_COMPACTION = true;
+  public static final int DEFAULT_MAJOR_COMPACTION_THREADS = 2;
+  public static final int DEFAULT_MAX_INPUT_FILE_SIZE_MB = 512;
+  public static final int DEFAULT_MAX_INPUT_FILE_COUNT = 10;
+  public static final int DEFAULT_MIN_INPUT_FILE_COUNT = 4;
+  
+  public static final int DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS = 720;
+  public static final int DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS = 30;
 
   /**
-   * @return name of HDFSStore provided at while creating the instance
+   * @return A unique identifier for the HDFSStore
    */
   public String getName();
 
   /**
-   * @return Namenode URL associated with this store
+   * HDFSStore persists data on a HDFS cluster identified by cluster's NameNode
+   * URL or NameNode Service URL. NameNode URL can also be provided via
+   * hdfs-site.xml (see HDFSClientConfigFile). If the NameNode url is missing
+   * HDFSStore creation will fail. HDFS client can also load hdfs configuration
+   * files in the classpath. NameNode URL provided in this way is also fine.
+   * 
+   * @return Namenode url explicitly configured by user
    */
   public String getNameNodeURL();
 
   /**
-   * @return Home directory where regions using this store will be persisted
+   * HomeDir is the HDFS directory path in which HDFSStore stores files. The
+   * value must not contain the NameNode URL. The owner of this node's JVM
+   * process must have read and write access to this directory. The path could
+   * be absolute or relative. If a relative path for HomeDir is provided, then
+   * the HomeDir is created relative to /user/JVM_owner_name or, if specified,
+   * relative to directory specified by the hdfs-root-dir property. As a best
+   * practice, HDFS store directories should be created relative to a single
+   * HDFS root directory. As an alternative, an absolute path beginning with the
+   * "/" character to override the default root location can be provided.
+   * 
+   * @return path
    */
   public String getHomeDir();
 
   /**
-   * @return hdfs client configuration referred by this store
+   * The full path to the HDFS client configuration file, for e.g. hdfs-site.xml
+   * or core-site.xml. This file must be accessible to any node where an
+   * instance of this HDFSStore will be created. If each node has a local copy
+   * of this configuration file, it is important for all the copies to be
+   * "identical". Alternatively, by default HDFS client can also load some HDFS
+   * configuration files if added in the classpath.
+   * 
+   * @return path
    */
   public String getHDFSClientConfigFile();
 
   /**
+   * The maximum amount of memory in megabytes to be used by HDFSStore.
+   * HDFSStore buffers data in memory to optimize HDFS IO operations. Once the
+   * configured memory is utilized, data may overflow to disk.
+   * 
+   * @return max memory in MB
+   */
+  public int getMaxMemory();
+
+  /**
    * @return the percentage of the heap to use for the block cache in the range
    * 0 ... 100
    */
   public float getBlockCacheSize();
 
   /**
-   * For write only tables, data is written to a single file until the file 
-   * reaches a size specified by this API or the time 
-   * for file rollover specified by {@link #getFileRolloverInterval()} has passed.
-   * Default is 256 MB.  
-   *   
-   * @return max file size in MB. 
+   * HDFSStore buffer data is persisted on HDFS in batches. The BatchSize
+   * defines the maximum size (in megabytes) of each batch that is written to
+   * HDFS. This parameter, along with BatchInterval determines the rate at which
+   * data is persisted on HDFS. A higher value means that less number of bigger
+   * batches are persisted to HDFS and hence big files are created on HDFS. But,
+   * bigger batches consume memory.
+   * 
+   * @return batchsize in MB
    */
-  public int getMaxFileSize();
+  public int getBatchSize();
   
   /**
-   * For write only tables, data is written to a single file until the file 
-   * reaches a certain size specified by {@link #getMaxFileSize()} or the time 
-   * for file rollover has passed. Default is 3600 seconds. 
-   *   
-   * @return time in seconds after which a file will be rolled over into a new file
+   * HDFSStore buffer data is persisted on HDFS in batches, and the
+   * BatchInterval defines the maximum time that can elapse between writing
+   * batches to HDFS. This parameter, along with BatchSize determines the rate
+   * at which data is persisted on HDFS.
+   * 
+   * @return interval in seconds
    */
-  public int getFileRolloverInterval();
+  public int getBatchInterval();
   
   /**
-   * @return true if auto compaction is enabled
+   * The maximum number of threads (per region) used to write batches to HDFS.
+   * If you have a large number of clients that add or update data in a region,
+   * then you may need to increase the number of dispatcher threads to avoid
+   * bottlenecks when writing data to HDFS.
+   * 
+   * @return The maximum number of threads
    */
-  public boolean getMinorCompaction();
+  public int getDispatcherThreads();
+  
+  /**
+   * Configure if HDFSStore in-memory buffer data, that has not been persisted
+   * on HDFS yet, should be persisted to a local disk to buffer prevent data
+   * loss. Persisting data may impact write performance. If performance is
+   * critical and buffer data loss is acceptable, disable persistence.
+   * 
+   * @return true if buffer is persisted locally
+   */
+  public boolean getBufferPersistent();
+
+  /**
+   * The named DiskStore to use for any local disk persistence needs of
+   * HDFSStore, for e.g. store's buffer persistence and buffer overflow. If you
+   * specify a value, the named DiskStore must exist. If you specify a null
+   * value or you omit this option, default DiskStore is used.
+   * 
+   * @return disk store name
+   */
+  public String getDiskStoreName();
 
   /**
-   * Return the HDFSEventQueueAttributes associated with this HDFSStore
+   * Synchronous flag indicates if synchronous disk writes are enabled or not.
+   * 
+   * @return true if enabled
    */
-  public HDFSEventQueueAttributes getHDFSEventQueueAttributes();
+  public boolean getSynchronousDiskWrite();
   
   /**
-   * Destroys this hdfs store. Removes the disk store from the cache. All
-   * regions on this store must be closed.
+   * For HDFS write-only regions, this defines the maximum size (in megabytes)
+   * that an HDFS log file can reach before HDFSStore closes the file and begins
+   * writing to a new file. This clause is ignored for HDFS read/write regions.
+   * Keep in mind that the files are not available for MapReduce processing
+   * until the file is closed; you can also set WriteOnlyFileRolloverInterval to
+   * specify the maximum amount of time an HDFS log file remains open.
    * 
+   * @return max file size in MB.
    */
-  public void destroy();
+  public int getMaxWriteOnlyFileSize();
+  
+  /**
+   * For HDFS write-only regions, this defines the maximum time that can elapse
+   * before HDFSStore closes an HDFS file and begins writing to a new file. This
+   * configuration is ignored for HDFS read/write regions.
+   * 
+   * @return interval in seconds 
+   */
+  public int getWriteOnlyFileRolloverInterval();
+  
+  /**
+   * Minor compaction reorganizes data in files to optimize read performance and
+   * reduce number of files created on HDFS. Minor compaction process can be
+   * I/O-intensive, tune the performance of minor compaction using
+   * MinorCompactionThreads.
+   * 
+   * @return true if auto minor compaction is enabled
+   */
+  public boolean getMinorCompaction();
 
   /**
-   * @return Instance of compaction configuration associated with this store
+   * The maximum number of threads that HDFSStore uses to perform minor
+   * compaction. You can increase the number of threads used for compaction as
+   * necessary in order to fully utilize the performance of your HDFS cluster.
+   * 
+   * @return maximum number of threads executing minor compaction
    */
-  public HDFSCompactionConfig getHDFSCompactionConfig();
+  public int getMinorCompactionThreads();
+
+  /**
+   * Major compaction removes old values of a key and deleted records from the
+   * HDFS files, which can save space in HDFS and improve performance when
+   * reading from HDFS. As major compaction process can be long-running and
+   * I/O-intensive, tune the performance of major compaction using
+   * MajorCompactionInterval and MajorCompactionThreads.
+   * 
+   * @return true if auto major compaction is enabled
+   */
+  public boolean getMajorCompaction();
+
+  /**
+   * The amount of time after which HDFSStore performs the next major compaction
+   * cycle.
+   * 
+   * @return interval in seconds
+   */
+  public int getMajorCompactionInterval();
+
+  /**
+   * The maximum number of threads that HDFSStore uses to perform major
+   * compaction. You can increase the number of threads used for compaction as
+   * necessary in order to fully utilize the performance of your HDFS cluster.
+   * 
+   * @return maximum number of threads executing major compaction
+   */
+  public int getMajorCompactionThreads();
   
   /**
-   * @return instance of mutator object that can be used to alter properties of
-   *         this store
+   * HDFSStore creates new files as part of periodic maintenance activity.
+   * Existing files are deleted asynchronously. PurgeInterval defines the amount
+   * of time old files remain available and could be externally, e.g. read by MR
+   * jobs. After this interval has passed, old files are deleted.
+   * 
+   * @return interval configuration that guides deletion of old files
+   */
+  public int getPurgeInterval();
+  
+  /**
+   * Permanently deletes all HDFS files associated with this this
+   * {@link HDFSStore}. This operation will fail ( {@link IllegalStateException}
+   * ) if any region is still using this store for persistence.
+   */
+  public void destroy();
+  
+  /**
+   * @return new instance of mutator object that can be used to alter properties
+   *         of this store
    */
   public HDFSStoreMutator createHdfsStoreMutator();
   
   /**
-   * Applies new attribute values provided using mutator to this instance
-   * dynmically.
+   * Identifies attributes configured in {@link HDFSStoreMutator} and applies
+   * the new attribute values to this instance of {@link HDFSStore} dynamically.
+   * Any property which is not set in {@link HDFSStoreMutator} remains
+   * unaltered. In most cases altering the attributes does not cause existing
+   * operations to terminate. The altered attributes are used in the next cycle
+   * of the operation they impact.
    * 
-   * @param mutator
-   *          contains the changes
-   * @return hdfsStore reference representing the old store configuration
+   * @return hdfsStore reference representing the old {@link HDFSStore}
    */
   public HDFSStore alter(HDFSStoreMutator mutator);
-      
-  public static interface HDFSCompactionConfig {
-    public static final String INVALID = "invalid";
-    public static final String SIZE_ORIENTED = "size-oriented";
-    public static final String DEFAULT_STRATEGY = SIZE_ORIENTED;
-    
-    public static final boolean DEFAULT_AUTO_COMPACTION = true;
-    public static final boolean DEFAULT_AUTO_MAJOR_COMPACTION = true;
-    public static final int DEFAULT_MAX_INPUT_FILE_SIZE_MB = 512;
-    public static final int DEFAULT_MAX_INPUT_FILE_COUNT = 10;
-    public static final int DEFAULT_MIN_INPUT_FILE_COUNT = 4;
-    public static final int DEFAULT_MAX_THREADS = 10;
-    
-    public static final int DEFAULT_MAJOR_COMPACTION_MAX_THREADS = 2;
-    public static final int DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS = 720;
-    public static final int DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS = 30;
-    
-    /**
-     * @return name of the compaction strategy configured for this store
-     */
-    public String getCompactionStrategy();
-
-    /**
-     * @return size threshold (in MB). A file larger than this size will not be
-     *         considered for compaction
-     */
-    public int getMaxInputFileSizeMB();
-
-    /**
-     * @return minimum count threshold. Compaction cycle will commence if the
-     *         number of files to be compacted is more than this number
-     */
-    public int getMinInputFileCount();
-
-    /**
-     * @return maximum count threshold.  Compaction cycle will not include more
-     *          files than the maximum
-     */
-    public int getMaxInputFileCount();
-
-    /**
-     * @return maximum number of threads executing minor compaction
-     */
-    public int getMaxThreads();
-
-    /**
-     * @return true if auto major compaction is enabled
-     */
-    public boolean getAutoMajorCompaction();
-
-    /**
-     * @return interval configuration that guides major compaction frequency
-     */
-    public int getMajorCompactionIntervalMins();
-
-    /**
-     * @return maximum number of threads executing major compaction
-     */
-    public int getMajorCompactionMaxThreads();
-    
-    /**
-     * @return interval configuration that guides deletion of old files
-     */
-    public int getOldFilesCleanupIntervalMins();
-  }
+
+  /**
+   * This advanced configuration affects minor compaction.
+   * @return size threshold (in MB). A file larger than this size will not be
+   *         considered for compaction
+   */
+  public int getMaxInputFileSizeMB();
+
+  /**
+   * This advanced configuration affects minor compaction.
+   * @return minimum count threshold. Compaction cycle will commence if the
+   *         number of files to be compacted is more than this number
+   */
+  public int getMinInputFileCount();
+
+  /**
+   * This advanced configuration affects minor compaction.
+   * @return maximum count threshold.  Compaction cycle will not include more
+   *          files than the maximum
+   */
+  public int getMaxInputFileCount();
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3772869d/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
index 516d2aa..949ff40 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
@@ -10,14 +10,16 @@ package com.gemstone.gemfire.cache.hdfs;
 
 import com.gemstone.gemfire.GemFireConfigException;
 import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore.HDFSCompactionConfig;
 
 /**
  * Factory for creating instances of {@link HDFSStore}. To get an instance of
  * this factory call {@link Cache#createHDFSStoreFactory}.
  * <P>
- * To use this factory configure it with the <code>set</code> methods and then
- * call {@link #create} to produce a HDFS store instance.
+ * Usage
+ * <ol>
+ * <li> configure factory using <code>set</code> methods
+ * <li> call {@link #create} to produce a HDFSStore instance.
+ * </ol>
  * 
  * @author Hemant Bhanawat
  * @author Ashvin Agrawal
@@ -25,171 +27,132 @@ import com.gemstone.gemfire.cache.hdfs.HDFSStore.HDFSCompactionConfig;
 public interface HDFSStoreFactory {
 
   /**
-   * @param name
-   *          name of HDFSStore provided at while creating the instance
+   * @see HDFSStore#getName()
    */
   public HDFSStoreFactory setName(String name);
 
   /**
-   * @param url
-   *          Namenode URL associated with this store
+   * @see HDFSStore#getNameNodeURL()
    */
   public HDFSStoreFactory setNameNodeURL(String url);
 
   /**
-   * @param dir
-   *          Home directory where regions using this store will be persisted
+   * @see HDFSStore#getHomeDir()
    */
   public HDFSStoreFactory setHomeDir(String dir);
 
   /**
-   * @param file
-   *          hdfs client configuration referred by this store
+   * @see HDFSStore#getHDFSClientConfigFile()
    */
-  public HDFSStoreFactory setHDFSClientConfigFile(String file);
+  public HDFSStoreFactory setHDFSClientConfigFile(String filePath);
 
   /**
-   * @param config
-   *          Instance of compaction configuration associated with this store
+   * @see HDFSStore#getHDFSClientConfigFile()
    */
-  public HDFSStoreFactory setHDFSCompactionConfig(HDFSCompactionConfig config);
-  
+  public HDFSStoreFactory setBlockCacheSize(float percentage);
+
   /**
-   * @param percentage
-   *          Size of the block cache as a percentage of the heap in the range
-   *          0 ... 100 
+   * @see HDFSStore#getMaxWriteOnlyFileSize()
    */
-  public HDFSStoreFactory setBlockCacheSize(float percentage);
-  
+  public HDFSStoreFactory setMaxWriteOnlyFileSize(int maxFileSize);
+
   /**
-   * Sets the HDFS event queue attributes
-   * This causes the store to use the {@link HDFSEventQueueAttributes}.
-   * @param hdfsEventQueueAttrs the attributes of the HDFS Event queue
-   * @return a reference to this RegionFactory object
-   * 
+   * @see HDFSStore#getWriteOnlyFileRolloverInterval()
    */
-  public HDFSStoreFactory setHDFSEventQueueAttributes(HDFSEventQueueAttributes hdfsEventQueueAttrs);
-  
+  public HDFSStoreFactory setWriteOnlyFileRolloverInterval(int interval);
+
   /**
-   * For write only tables, data is written to a single file until the file 
-   * reaches a size specified by this API or the time 
-   * for file rollover specified by {@link #setFileRolloverInterval(int)} has passed.  
-   * Default is 256 MB. 
-   * 
-   * @param maxFileSize max file size in MB
+   * @see HDFSStore#getMinorCompaction()
    */
-  public HDFSStoreFactory setMaxFileSize(int maxFileSize);
-  
+  public HDFSStoreFactory setMinorCompaction(boolean auto);
+
   /**
-   * For write only tables, data is written to a single file until the file 
-   * reaches a certain size specified by {@link #setMaxFileSize(int)} or the time 
-   * for file rollover has passed. Default is 3600 seconds. 
-   * 
-   * @param rolloverIntervalInSecs time in seconds after which a file will be rolled over into a new file
+   * @see HDFSStore#getMinorCompactionThreads()
    */
-  public HDFSStoreFactory setFileRolloverInterval(int rolloverIntervalInSecs);
-  
+  public HDFSStoreFactory setMinorCompactionThreads(int count);
+
   /**
-   * @param auto
-   *          true if auto compaction is enabled
+   * @see HDFSStore#getMajorCompaction()
    */
-  public HDFSStoreFactory setMinorCompaction(boolean auto);
+  public HDFSStoreFactory setMajorCompaction(boolean auto);
 
   /**
-   * @param strategy
-   *          name of the compaction strategy or null for letting system choose
-   *          and apply default compaction strategy
-   * @return instance of {@link HDFSCompactionConfigFactory}
-   */
-  public HDFSCompactionConfigFactory createCompactionConfigFactory(String strategy);
-
-  public static interface HDFSCompactionConfigFactory {
-
-    /**
-     * @param size
-     *          size threshold (in MB). A file larger than this size will not be
-     *          considered for compaction
-     */
-    public HDFSCompactionConfigFactory setMaxInputFileSizeMB(int size);
-
-    /**
-     * @param count
-     *          minimum count threshold. Compaction cycle will commence if the
-     *          number of files to be compacted is more than this number
-     */
-    public HDFSCompactionConfigFactory setMinInputFileCount(int count);
-
-    /**
-     * @param count
-     *          maximum count threshold.  Compaction cycle will not include more
-     *          files than the maximum
-     */
-    public HDFSCompactionConfigFactory setMaxInputFileCount(int count);
-
-    /**
-     * @param count
-     *          maximum number of threads executing minor compaction. Count must
-     *          be greater than 0
-     */
-    public HDFSCompactionConfigFactory setMaxThreads(int count);
-
-    /**
-     * @param auto
-     *          true if auto major compaction is enabled
-     */
-    public HDFSCompactionConfigFactory setAutoMajorCompaction(boolean auto);
-
-    /**
-     * @param interval
-     *          interval configuration that guides major compaction frequency
-     */
-    public HDFSCompactionConfigFactory setMajorCompactionIntervalMins(int interval);
-
-    /**
-     * @param count
-     *          maximum number of threads executing major compaction. Count must
-     *          be greater than 0
-     */
-    public HDFSCompactionConfigFactory setMajorCompactionMaxThreads(int count);
-    
-    /**
-     * @param interval
-     *          interval configuration that guides deletion of old files
-     */
-    public HDFSCompactionConfigFactory setOldFilesCleanupIntervalMins(int interval);
-    
-    /**
-     * Create a {@link HDFSCompactionConfig}. The returned instance will have
-     * the same configuration as that this factory.
-     * 
-     * @return the newly created {@link HDFSCompactionConfig}
-     * @throws GemFireConfigException
-     *           if the cache xml is invalid
-     */
-    public HDFSCompactionConfig create() throws GemFireConfigException;
-    
-    /**
-     * @return A {@link HDFSCompactionConfig} view of this factory
-     * @throws GemFireConfigException
-     */
-    public HDFSCompactionConfig getConfigView();
-  }
-
-  /**
-   * Create a new HDFS store. The returned HDFS store's configuration will be
-   * the same as this factory's configuration.
+   * @see HDFSStore#getMajorCompactionInterval()
+   */
+  public HDFSStoreFactory setMajorCompactionInterval(int interval);
+
+  /**
+   * @see HDFSStore#getMajorCompactionThreads()
+   */
+  public HDFSStoreFactory setMajorCompactionThreads(int count);
+
+  /**
+   * @see HDFSStore#getMaxInputFileSizeMB()
+   */
+  public HDFSStoreFactory setMaxInputFileSizeMB(int size);
+
+  /**
+   * @see HDFSStore#getMinInputFileCount()
+   */
+  public HDFSStoreFactory setMinInputFileCount(int count);
+
+  /**
+   * @see HDFSStore#getMaxInputFileCount()
+   */
+  public HDFSStoreFactory setMaxInputFileCount(int count);
+
+  /**
+   * @see HDFSStore#getPurgeInterval()
+   */
+  public HDFSStoreFactory setPurgeInterval(int interval);
+
+  /**
+   * @see HDFSStore#getDiskStoreName()
+   */
+  public HDFSStoreFactory setDiskStoreName(String name);
+
+  /**
+   * @see HDFSStore#getMaxMemory()
+   */
+  public HDFSStoreFactory setMaxMemory(int memory);
+
+  /**
+   * @see HDFSStore#getBatchInterval()
+   */
+  public HDFSStoreFactory setBatchInterval(int interval);
+
+  /**
+   * @see HDFSStore#getBatchSize()
+   */
+  public HDFSStoreFactory setBatchSize(int size);
+
+  /**
+   * @see HDFSStore#getBufferPersistent()
+   */
+  public HDFSStoreFactory setBufferPersistent(boolean isPersistent);
+
+  /**
+   * @see HDFSStore#getSynchronousDiskWrite()
+   */
+  public HDFSStoreFactory setSynchronousDiskWrite(boolean isSynchronous);
+
+  /**
+   * @see HDFSStore#getDispatcherThreads()
+   */
+  public HDFSStoreFactory setDispatcherThreads(int dispatcherThreads);
+
+  /**
+   * Validates all attribute values and assigns defaults where applicable.
+   * Creates a new instance of {@link HDFSStore} based on the current attribute
+   * values configured in this factory.
    * 
    * @param name
    *          the name of the HDFSStore
    * @return the newly created HDFSStore.
    * @throws GemFireConfigException
-   *           if the cache xml is invalid
+   *           if the configuration is invalid
    * @throws StoreExistsException
-   *           if another instance of {@link HDFSStore} with the same exists
+   *           if a {@link HDFSStore} with the same name exists
    */
-  public HDFSStore create(String name) throws GemFireConfigException,
-      StoreExistsException;
-
-  // TODO this is the only non-factory instance getter in this class
-  HDFSEventQueueAttributes getHDFSEventQueueAttributes();
+  public HDFSStore create(String name) throws GemFireConfigException, StoreExistsException;
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3772869d/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
index 47b1708..7b0229c 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
@@ -8,192 +8,173 @@
 
 package com.gemstone.gemfire.cache.hdfs;
 
-import com.gemstone.gemfire.cache.hdfs.HDFSStore.HDFSCompactionConfig;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory.HDFSCompactionConfigFactory;
-
 public interface HDFSStoreMutator {
   /**
-   * {@link HDFSStoreFactory#setMaxFileSize(int)}
+   * {@link HDFSStoreFactory#setMaxWriteOnlyFileSize(int)}
    */
-  public HDFSStoreMutator setMaxFileSize(int maxFileSize);
+  public HDFSStoreMutator setMaxWriteOnlyFileSize(int maxFileSize);
 
   /**
-   * {@link HDFSStore#getMaxFileSize()}
+   * {@link HDFSStore#getMaxWriteOnlyFileSize()}
    * 
    * @return value to be used when mutator is executed on hdfsStore. -1 if not
    *         set
    */
-  public int getMaxFileSize();
-  
+  public int getMaxWriteOnlyFileSize();
+
   /**
-   * {@link HDFSStoreFactory#setFileRolloverInterval(int)}
+   * {@link HDFSStoreFactory#setWriteOnlyFileRolloverInterval(int)}
    */
-  public HDFSStoreMutator setFileRolloverInterval(int rolloverIntervalInSecs);
-  
+  public HDFSStoreMutator setWriteOnlyFileRolloverInterval(int interval);
+
   /**
-   * {@link HDFSStore#getFileRolloverInterval()}
+   * {@link HDFSStore#getWriteOnlyFileRolloverInterval()}
    * 
    * @return value to be used when mutator is executed on hdfsStore. -1 if not
    *         set
    */
-  public int getFileRolloverInterval();
-  
+  public int getWriteOnlyFileRolloverInterval();
+
   /**
    * {@link HDFSStore#getMinorCompaction()}
    * 
-   * @return value to be used when mutator is executed on hdfsStore. null if
-   *         not set
+   * @return value to be used when mutator is executed on hdfsStore. null if not
+   *         set
    */
   public Boolean getMinorCompaction();
 
   /**
    * {@link HDFSStoreFactory#setMinorCompaction(boolean)}
    */
-  public HDFSCompactionConfigMutator setMinorCompaction(boolean auto);
-  
-  /**
-   * Reuturns mutator for compaction configuration of hdfs store
-   * @return instance of {@link HDFSCompactionConfigMutator}
-   */
-  public HDFSCompactionConfigMutator getCompactionConfigMutator();
-
-  /**
-   * Reuturns mutator for hdfs event queue of hdfs store
-   * @return instance of {@link HDFSEventQueueAttributesMutator}
-   */
-  public HDFSEventQueueAttributesMutator getHDFSEventQueueAttributesMutator();
-  
-  public static interface HDFSEventQueueAttributesMutator {
-    /**
-     * {@link HDFSEventQueueAttributesFactory#setBatchSizeMB(int)}
-     */
-    public HDFSEventQueueAttributesMutator setBatchSizeMB(int size);
-    
-    /**
-     * {@link HDFSEventQueueAttributes#getBatchSizeMB()}
-     * 
-     * @return value to be used when mutator is executed on hdfsStore. -1 if not
-     *         set
-     */
-    public int getBatchSizeMB();
-    
-    /**
-     * {@link HDFSEventQueueAttributesFactory#setBatchTimeInterval(int)}
-     */
-    public HDFSEventQueueAttributesMutator setBatchTimeInterval(int interval);
-    
-    /**
-     * {@link HDFSEventQueueAttributes#getBatchTimeInterval()}
-     * 
-     * @return value to be used when mutator is executed on hdfsStore. -1 if not
-     *         set
-     */
-    public int getBatchTimeInterval();
-  }
-  
-  public static interface HDFSCompactionConfigMutator {
-    /**
-     * {@link HDFSCompactionConfigFactory#setMaxInputFileSizeMB(int)}
-     */
-    public HDFSCompactionConfigMutator setMaxInputFileSizeMB(int size);
-    
-    /**
-     * {@link HDFSCompactionConfig#getMaxInputFileSizeMB()}
-     * 
-     * @return value to be used when mutator is executed on hdfsStore. -1 if
-     *         not set
-     */
-    public int getMaxInputFileSizeMB();
-
-    /**
-     * {@link HDFSCompactionConfigFactory#setMinInputFileCount(int)}
-     */
-    public HDFSCompactionConfigMutator setMinInputFileCount(int count);
-    
-    /**
-     * {@link HDFSCompactionConfig#getMinInputFileCount()}
-     * 
-     * @return value to be used when mutator is executed on hdfsStore. -1 if
-     *         not set
-     */
-    public int getMinInputFileCount();
-
-    /**
-     * {@link HDFSCompactionConfigFactory#setMaxInputFileCount(int)}
-     */
-    public HDFSCompactionConfigMutator setMaxInputFileCount(int count);
-    
-    /**
-     * {@link HDFSCompactionConfig#getMaxInputFileCount()}
-     * 
-     * @return value to be used when mutator is executed on hdfsStore. -1 if
-     *         not set
-     */
-    public int getMaxInputFileCount();
-
-    /**
-     * {@link HDFSCompactionConfigFactory#setMaxThreads(int)}
-     */
-    public HDFSCompactionConfigMutator setMaxThreads(int count);
-    
-    /**
-     * {@link HDFSCompactionConfig#getMaxThreads()}
-     * 
-     * @return value to be used when mutator is executed on hdfsStore. -1 if
-     *         not set
-     */
-    public int getMaxThreads();
-
-    /**
-     * {@link HDFSCompactionConfigFactory#setAutoMajorCompaction(boolean)}
-     */
-    public HDFSCompactionConfigMutator setAutoMajorCompaction(boolean auto);
-    
-    /**
-     * {@link HDFSCompactionConfig#getAutoMajorCompaction()}
-     * 
-     * @return value to be used when mutator is executed on hdfsStore. null if
-     *         not set
-     */
-    public Boolean getAutoMajorCompaction();
-
-    /**
-     * {@link HDFSCompactionConfigFactory#setMajorCompactionIntervalMins(int)}
-     */
-    public HDFSCompactionConfigMutator setMajorCompactionIntervalMins(int interval);
-    
-    /**
-     * {@link HDFSCompactionConfig#getMajorCompactionIntervalMins()}
-     * 
-     * @return value to be used when mutator is executed on hdfsStore. -1 if
-     *         not set
-     */
-    public int getMajorCompactionIntervalMins();
-
-    /**
-     * {@link HDFSCompactionConfigFactory#setMajorCompactionMaxThreads(int)}
-     */
-    public HDFSCompactionConfigMutator setMajorCompactionMaxThreads(int count);
-    
-    /**
-     * {@link HDFSCompactionConfig#getMajorCompactionMaxThreads()}
-     * 
-     * @return value to be used when mutator is executed on hdfsStore. -1 if
-     *         not set
-     */
-    public int getMajorCompactionMaxThreads();
-    
-    /**
-     * {@link HDFSCompactionConfigFactory#setOldFilesCleanupIntervalMins(int)}
-     */
-    public HDFSCompactionConfigMutator setOldFilesCleanupIntervalMins(int interval);
-    
-    /**
-     * {@link HDFSCompactionConfig#getOldFilesCleanupIntervalMins()}
-     * 
-     * @return value to be used when mutator is executed on hdfsStore. -1 if
-     *         not set
-     */
-    public int getOldFilesCleanupIntervalMins();
-  }
+  public HDFSStoreMutator setMinorCompaction(boolean auto);
+
+  /**
+   * {@link HDFSStoreFactory#setMinorCompactionThreads(int)}
+   */
+  public HDFSStoreMutator setMinorCompactionThreads(int count);
+
+  /**
+   * {@link HDFSStore#getMinorCompactionThreads()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getMinorCompactionThreads();
+
+  /**
+   * {@link HDFSStoreFactory#setMajorCompaction(boolean)}
+   */
+  public HDFSStoreMutator setMajorCompaction(boolean auto);
+
+  /**
+   * {@link HDFSStore#getMajorCompaction()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. null if not
+   *         set
+   */
+  public Boolean getMajorCompaction();
+
+  /**
+   * {@link HDFSStoreFactory#setMajorCompactionInterval(int)}
+   */
+  public HDFSStoreMutator setMajorCompactionInterval(int interval);
+
+  /**
+   * {@link HDFSStore#getMajorCompactionInterval()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getMajorCompactionInterval();
+
+  /**
+   * {@link HDFSStoreFactory#setMajorCompactionThreads(int)}
+   */
+  public HDFSStoreMutator setMajorCompactionThreads(int count);
+
+  /**
+   * {@link HDFSStore#getMajorCompactionThreads()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getMajorCompactionThreads();
+
+  /**
+   * {@link HDFSStoreFactory#setMaxInputFileSizeMB(int)}
+   */
+  public HDFSStoreMutator setMaxInputFileSizeMB(int size);
+
+  /**
+   * {@link HDFSStore#getMaxInputFileSizeMB()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getMaxInputFileSizeMB();
+
+  /**
+   * {@link HDFSStoreFactory#setMinInputFileCount(int)}
+   */
+  public HDFSStoreMutator setMinInputFileCount(int count);
+
+  /**
+   * {@link HDFSStore#getMinInputFileCount()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getMinInputFileCount();
+
+  /**
+   * {@link HDFSStoreFactory#setMaxInputFileCount(int)}
+   */
+  public HDFSStoreMutator setMaxInputFileCount(int count);
+
+  /**
+   * {@link HDFSStore#getMaxInputFileCount()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getMaxInputFileCount();
+
+  /**
+   * {@link HDFSStoreFactory#setPurgeInterval(int)}
+   */
+  public HDFSStoreMutator setPurgeInterval(int interval);
+
+  /**
+   * {@link HDFSStore#getPurgeInterval()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getPurgeInterval();
+
+  /**
+   * {@link HDFSStore#getBatchSize()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getBatchSize();
+
+  /**
+   * {@link HDFSStoreFactory#setBatchSize(int)}
+   */
+  public HDFSStoreMutator setBatchSize(int size);
+
+  /**
+   * {@link HDFSStore#getBatchInterval()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getBatchInterval();
+
+  /**
+   * {@link HDFSStoreFactory#setBatchInterval(int)}
+   */
+  public HDFSStoreMutator setBatchInterval(int interval);
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3772869d/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueAttributesImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueAttributesImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueAttributesImpl.java
deleted file mode 100644
index df89841..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueAttributesImpl.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *========================================================================
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import com.gemstone.gemfire.DataSerializable;
-import com.gemstone.gemfire.DataSerializer;
-import com.gemstone.gemfire.cache.hdfs.HDFSEventQueueAttributes;
-import com.gemstone.gemfire.internal.lang.ObjectUtils;
-
-/**
- * Implementation of HDFSEventQueueAttributes
- * HDFSEventQueueAttributes represents the attributes of the buffer where events are 
- * accumulated before they are persisted to HDFS  
- * 
- * @author Hemant Bhanawat
- */
-public class HDFSEventQueueAttributesImpl implements HDFSEventQueueAttributes, DataSerializable, Cloneable {
-
-  private static final long serialVersionUID = 5052784372168230680L;
-  private int maximumQueueMemory;
-  private int batchSize;
-  private boolean isPersistenceEnabled;
-  public String diskStoreName;
-  private int batchIntervalMillis;
-  private boolean diskSynchronous;
-  private int dispatcherThreads;
-  
-  public HDFSEventQueueAttributesImpl(String diskStoreName,
-      int maximumQueueMemory, int batchSize, boolean isPersistenceEnabled,
-      int batchIntervalMillis,  boolean diskSynchronous, int dispatcherThreads) {
-    this.diskStoreName = diskStoreName;
-    this.maximumQueueMemory = maximumQueueMemory;
-    this.batchSize = batchSize;
-    this.isPersistenceEnabled = isPersistenceEnabled;
-    this.batchIntervalMillis = batchIntervalMillis;
-    this.diskSynchronous = diskSynchronous;
-    this.dispatcherThreads = dispatcherThreads;
-  }
-
-  @Override
-  public String getDiskStoreName() {
-    return this.diskStoreName;
-  }
-
-  @Override
-  public int getMaximumQueueMemory() {
-    return this.maximumQueueMemory;
-  }
-
-  @Override
-  public int getBatchSizeMB() {
-    return this.batchSize;
-  }
-
-  @Override
-  public boolean isPersistent() {
-    return this.isPersistenceEnabled;
-  }
-
-  @Override
-  public int getBatchTimeInterval() {
-    return this.batchIntervalMillis;
-  }
-
-  @Override
-  public boolean isDiskSynchronous() {
-    return this.diskSynchronous;
-  }
-
-  @Override
-  public String toString()
-  {
-    StringBuffer s = new StringBuffer();
-    return s.append("HDFSEventQueueAttributes@")
-      .append(System.identityHashCode(this))
-      .append("[maximumQueueMemory=").append(this.maximumQueueMemory)
-      .append(";batchSize=").append(this.batchSize)
-      .append(";isPersistenceEnabled=").append(this.isPersistenceEnabled)
-      .append(";diskStoreName=").append(this.diskStoreName)
-      .append(";batchIntervalMillis=").append(this.batchIntervalMillis)
-      .append(";diskSynchronous=").append(this.diskSynchronous)
-      .append(";dispatcherThreads=").append(this.dispatcherThreads)
-      .append("]") .toString();
-  }
-
-  @Override
-  public void toData(DataOutput out) throws IOException {
-    out.writeInt(this.maximumQueueMemory);
-    out.writeInt(this.batchSize);
-    out.writeBoolean(this.isPersistenceEnabled);
-    DataSerializer.writeString(this.diskStoreName, out);
-    out.writeInt(this.batchIntervalMillis);
-    out.writeBoolean(this.diskSynchronous);
-    out.writeInt(this.dispatcherThreads);
-  }
-
-  @Override
-  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-    this.maximumQueueMemory = in.readInt();
-    this.batchSize = in.readInt();
-    this.isPersistenceEnabled = in.readBoolean();
-    this.diskStoreName = DataSerializer.readString(in);
-    this.batchIntervalMillis = in.readInt();
-    this.diskSynchronous = in.readBoolean();
-    this.dispatcherThreads = in.readInt();
-  }
-  
-  @Override
-  public boolean equals(final Object obj) {
-    if (this == obj) { 
-      return true;
-    }
-    
-    if (! (obj instanceof HDFSEventQueueAttributes)) {
-      return false;
-    }
-      
-    HDFSEventQueueAttributes other = (HDFSEventQueueAttributes) obj;
-      
-      if (this.maximumQueueMemory != other.getMaximumQueueMemory()
-          || this.batchSize != other.getBatchSizeMB()
-          || this.isPersistenceEnabled != other.isPersistent()
-          || this.batchIntervalMillis != other.getBatchTimeInterval()
-          || this.diskSynchronous != other.isDiskSynchronous()
-          || this.dispatcherThreads != other.getDispatcherThreads()
-              || ObjectUtils.equals(getDiskStoreName(), other.getDiskStoreName())
-        ) {
-        return false;
-        
-    }
-  
-    return true;
-  }
-  
-  @Override
-  public Object clone() {
-    HDFSEventQueueAttributesImpl other = null;
-    try {
-      other =
-          (HDFSEventQueueAttributesImpl) super.clone();
-    } catch (CloneNotSupportedException e) {
-    } 
-    other.maximumQueueMemory = this.maximumQueueMemory;
-    other.batchSize = this.batchSize;
-    other.isPersistenceEnabled = this.isPersistenceEnabled;
-    other.diskStoreName = this.diskStoreName;
-    other.batchIntervalMillis = this.batchIntervalMillis;
-    other.diskSynchronous = this.diskSynchronous;
-    other.dispatcherThreads = this.dispatcherThreads;
-    return other;
-  }
-  
-  @Override
-  public int hashCode() {
-	assert false : "hashCode not designed";
-	return -1;
-  }
-  
-  public HDFSEventQueueAttributesImpl copy() {
-    return (HDFSEventQueueAttributesImpl) clone();
-  }
-
-	@Override
-  public int getDispatcherThreads() {
-	  return this.dispatcherThreads;
-  }
-
-  
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3772869d/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java
index 242923b..968b9e1 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java
@@ -8,15 +8,12 @@
 
 package com.gemstone.gemfire.cache.hdfs.internal;
 
-
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
 import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
 import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.HDFSEventQueueAttributes;
-import com.gemstone.gemfire.cache.hdfs.HDFSEventQueueAttributesFactory;
 import com.gemstone.gemfire.cache.hdfs.HDFSStore;
 import com.gemstone.gemfire.i18n.LogWriterI18n;
 import com.gemstone.gemfire.internal.Assert;
@@ -30,89 +27,77 @@ import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
  *
  */
 public class HDFSIntegrationUtil {
-  
-  public static <K, V> AsyncEventQueue createDefaultAsyncQueueForHDFS(Cache cache, 
-     boolean writeOnly, String regionPath)
-  {
-    // Create default event attributes 
-    HDFSEventQueueAttributesFactory  hdfsqueueFactory = new HDFSEventQueueAttributesFactory();
-    return createAsyncQueueForHDFS(cache,
-        regionPath, writeOnly, hdfsqueueFactory.create());
+
+  public static <K, V> AsyncEventQueue createDefaultAsyncQueueForHDFS(Cache cache, boolean writeOnly, String regionPath) {
+    return createAsyncQueueForHDFS(cache, regionPath, writeOnly, null);
   }
-  
-  public static AsyncEventQueue createAsyncQueueForHDFS(Cache cache,
-      String regionPath, boolean writeOnly, HDFSEventQueueAttributes eventAttribs)
-   {
-     LogWriterI18n logger = cache.getLoggerI18n();
-     String defaultAsyncQueueName = HDFSStoreFactoryImpl.getEventQueueName(regionPath);
-
-     AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory();
-     factory.setBatchSize(eventAttribs.getBatchSizeMB());
-     factory.setPersistent(eventAttribs.isPersistent());
-     factory.setDiskStoreName(eventAttribs.getDiskStoreName());
-     factory.setMaximumQueueMemory(eventAttribs.getMaximumQueueMemory());
-     factory.setBatchTimeInterval(eventAttribs.getBatchTimeInterval());
-     factory.setDiskSynchronous(eventAttribs.isDiskSynchronous());
-     factory.setDiskSynchronous(eventAttribs.isDiskSynchronous());
-     factory.setDispatcherThreads(eventAttribs.getDispatcherThreads());
-     factory.setParallel(true);
-     factory.addGatewayEventFilter(new HDFSEventQueueFilter(logger));
-     ((AsyncEventQueueFactoryImpl)factory).setBucketSorted(!writeOnly);
-     ((AsyncEventQueueFactoryImpl)factory).setIsHDFSQueue(true);
-     
-     AsyncEventQueue asyncQ = null;
-     
-     if (!writeOnly)
-       asyncQ = factory.create(defaultAsyncQueueName, new HDFSEventListener(cache.getLoggerI18n()));
-     else
-       asyncQ = factory.create(defaultAsyncQueueName, new HDFSWriteOnlyStoreEventListener(cache.getLoggerI18n()));
-     
-     logger.fine("HDFS: async queue created for HDFS. Id: " + asyncQ.getId() + ". Disk store: " + asyncQ.getDiskStoreName() + 
-         ". Batch size: " + asyncQ.getBatchSize() + ". bucket sorted:  " + !writeOnly) ;
-     return asyncQ;
-     
-   }
-  
-  public static  void createAndAddAsyncQueue(String regionPath,
-      RegionAttributes regionAttributes, Cache cache) {
-    if(!regionAttributes.getDataPolicy().withHDFS()) {
-      return;
+
+  private static AsyncEventQueue createAsyncQueueForHDFS(Cache cache, String regionPath, boolean writeOnly,
+      HDFSStore configView) {
+    LogWriterI18n logger = cache.getLoggerI18n();
+    String defaultAsyncQueueName = HDFSStoreFactoryImpl.getEventQueueName(regionPath);
+
+    if (configView == null) {
+      configView = new HDFSStoreFactoryImpl(cache).getConfigView();
     }
     
+
+    AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory();
+    factory.setBatchSize(configView.getBatchSize());
+    factory.setPersistent(configView.getBufferPersistent());
+    factory.setDiskStoreName(configView.getDiskStoreName());
+    factory.setMaximumQueueMemory(configView.getMaxMemory());
+    factory.setBatchTimeInterval(configView.getBatchInterval());
+    factory.setDiskSynchronous(configView.getSynchronousDiskWrite());
+    factory.setDispatcherThreads(configView.getDispatcherThreads());
+    factory.setParallel(true);
+    factory.addGatewayEventFilter(new HDFSEventQueueFilter(logger));
+    ((AsyncEventQueueFactoryImpl) factory).setBucketSorted(!writeOnly);
+    ((AsyncEventQueueFactoryImpl) factory).setIsHDFSQueue(true);
+
+    AsyncEventQueue asyncQ = null;
+
+    if (!writeOnly)
+      asyncQ = factory.create(defaultAsyncQueueName, new HDFSEventListener(cache.getLoggerI18n()));
+    else
+      asyncQ = factory.create(defaultAsyncQueueName, new HDFSWriteOnlyStoreEventListener(cache.getLoggerI18n()));
+
+    logger.fine("HDFS: async queue created for HDFS. Id: " + asyncQ.getId() + ". Disk store: "
+        + asyncQ.getDiskStoreName() + ". Batch size: " + asyncQ.getBatchSize() + ". bucket sorted:  " + !writeOnly);
+    return asyncQ;
+
+  }
+
+  public static void createAndAddAsyncQueue(String regionPath, RegionAttributes regionAttributes, Cache cache) {
+    if (!regionAttributes.getDataPolicy().withHDFS()) {
+      return;
+    }
+
     String leaderRegionPath = getLeaderRegionPath(regionPath, regionAttributes, cache);
-    
+
     String defaultAsyncQueueName = HDFSStoreFactoryImpl.getEventQueueName(leaderRegionPath);
     if (cache.getAsyncEventQueue(defaultAsyncQueueName) == null) {
-      if (regionAttributes.getHDFSStoreName() != null && regionAttributes.getPartitionAttributes() != null 
+      if (regionAttributes.getHDFSStoreName() != null && regionAttributes.getPartitionAttributes() != null
           && !(regionAttributes.getPartitionAttributes().getLocalMaxMemory() == 0)) {
-        HDFSStore store = ((GemFireCacheImpl)cache).findHDFSStore(regionAttributes.getHDFSStoreName());
+        HDFSStore store = ((GemFireCacheImpl) cache).findHDFSStore(regionAttributes.getHDFSStoreName());
         if (store == null) {
           throw new IllegalStateException(
-              LocalizedStrings.HOPLOG_HDFS_STORE_NOT_FOUND
-                  .toLocalizedString(regionAttributes.getHDFSStoreName()));
-        }
-        HDFSEventQueueAttributes queueAttrs = store.getHDFSEventQueueAttributes();
-        if(queueAttrs == null) {
-          // no async queue is specified for region with a HDFS store. Create a async queue with default 
-          // properties and set the bucketsorted=true.
-          HDFSIntegrationUtil.createDefaultAsyncQueueForHDFS(cache, regionAttributes.getHDFSWriteOnly(), leaderRegionPath);
-        }
-        else {
-          HDFSIntegrationUtil.createAsyncQueueForHDFS(cache, leaderRegionPath, regionAttributes.getHDFSWriteOnly(), queueAttrs);
+              LocalizedStrings.HOPLOG_HDFS_STORE_NOT_FOUND.toLocalizedString(regionAttributes.getHDFSStoreName()));
         }
+        HDFSIntegrationUtil
+            .createAsyncQueueForHDFS(cache, leaderRegionPath, regionAttributes.getHDFSWriteOnly(), store);
       }
     }
   }
 
-  private static String getLeaderRegionPath(String regionPath,
-      RegionAttributes regionAttributes, Cache cache) {
+  private static String getLeaderRegionPath(String regionPath, RegionAttributes regionAttributes, Cache cache) {
     String colocated;
-    while(regionAttributes.getPartitionAttributes() != null 
+    while (regionAttributes.getPartitionAttributes() != null
         && (colocated = regionAttributes.getPartitionAttributes().getColocatedWith()) != null) {
       // Do not waitOnInitialization() for PR
-      GemFireCacheImpl gfc = (GemFireCacheImpl)cache;
+      GemFireCacheImpl gfc = (GemFireCacheImpl) cache;
       Region colocatedRegion = gfc.getPartitionedRegion(colocated, false);
-      if(colocatedRegion == null) {
+      if (colocatedRegion == null) {
         Assert.fail("Could not find parent region " + colocated + " for " + regionPath);
       }
       regionAttributes = colocatedRegion.getAttributes();


Mime
View raw message