GEODE-10: Absorb review comments

Reviewers posted useful comments on javadoc and methods names. Link to the
reviewboard: https://reviews.apache.org/r/36397


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/5a90bf11
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/5a90bf11
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/5a90bf11

Branch: refs/heads/feature/GEODE-77
Commit: 5a90bf11756cd198ec2b46c74fab0958632758a5
Parents: 3772869
Author: Ashvin Agrawal <ash...@apache.org>
Authored: Wed Jul 15 15:37:27 2015 -0700
Committer: Ashvin Agrawal <ash...@apache.org>
Committed: Wed Jul 15 15:41:42 2015 -0700

----------------------------------------------------------------------
 .../gemstone/gemfire/cache/hdfs/HDFSStore.java  | 171 +++++++++++--------
 .../gemfire/cache/hdfs/HDFSStoreFactory.java    |  56 +++++-
 .../gemfire/cache/hdfs/HDFSStoreMutator.java    |  40 +++--
 .../hdfs/internal/HDFSStoreConfigHolder.java    |  54 +++---
 .../cache/hdfs/internal/HDFSStoreCreation.java  |  16 +-
 .../cache/hdfs/internal/HDFSStoreImpl.java      |  16 +-
 .../hdfs/internal/HDFSStoreMutatorImpl.java     |  32 ++--
 .../hoplog/HDFSUnsortedHoplogOrganizer.java     |   4 +-
 .../hoplog/HdfsSortedOplogOrganizer.java        |   6 +-
 .../internal/cache/xmlcache/CacheXmlParser.java |   2 +-
 .../cli/commands/HDFSStoreCommands.java         |   4 +-
 .../cli/functions/AlterHDFSStoreFunction.java   |   2 +-
 .../cli/functions/CreateHDFSStoreFunction.java  |   2 +-
 .../hdfs/internal/HDFSConfigJUnitTest.java      |  30 ++--
 .../internal/HdfsStoreMutatorJUnitTest.java     |  52 +++---
 .../hoplog/TieredCompactionJUnitTest.java       |  18 +-
 .../hoplog/mapreduce/HoplogUtilJUnitTest.java   |   2 +-
 .../commands/HDFSStoreCommandsJUnitTest.java    |   8 +-
 .../DescribeHDFSStoreFunctionJUnitTest.java     |  10 +-
 19 files changed, 304 insertions(+), 221 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
index c9b399d..0458721 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
@@ -16,11 +16,11 @@ import com.gemstone.gemfire.cache.wan.GatewaySender;
  * store will share the same HDFS persistence attributes. A user will normally
  * perform the following steps to enable HDFS persistence for a region:
  * <ol>
- * <li>[Optional] Creates a Disk store for reliability
- * <li>HDFS buffers will use local persistence till it is persisted on HDFS
- * <li>Creates a HDFS Store
- * <li>Creates a Region connected to HDFS Store Uses region API to create and
- * query data
+ * <li>[Optional] Creates a DiskStore for HDFS buffer reliability (HDFS buffers
+ * will be persisted locally till data lands on HDFS)
+ * <li>Creates a HDFS Store (connects to DiskStore created earlier)
+ * <li>Creates a Region connected to HDFS Store
+ * <li>Uses region API to create and query data
  * </ol>
  * <p>
  * Instances of this interface are created using {@link 
HDFSStoreFactory#create}
@@ -32,9 +32,9 @@ import com.gemstone.gemfire.cache.wan.GatewaySender;
 public interface HDFSStore {
   public static final String DEFAULT_HOME_DIR = "gemfire";
   public static final float DEFAULT_BLOCK_CACHE_SIZE = 10f;
-  public static final int DEFAULT_MAX_WRITE_ONLY_FILE_SIZE = 256; 
+  public static final int DEFAULT_WRITE_ONLY_FILE_SIZE_LIMIT = 256;
   public static final int DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL = 3600;
-  
+
   public static final int DEFAULT_BATCH_SIZE_MB = 32;
   public static final int DEFAULT_BATCH_INTERVAL_MILLIS = 60000;
   public static final boolean DEFAULT_WRITEONLY_HDFSSTORE = false;
@@ -42,15 +42,15 @@ public interface HDFSStore {
   public static final boolean DEFAULT_DISK_SYNCHRONOUS = 
GatewaySender.DEFAULT_DISK_SYNCHRONOUS;
   public static final int DEFAULT_MAX_BUFFER_MEMORY = 
GatewaySender.DEFAULT_MAXIMUM_QUEUE_MEMORY;
   public static final int DEFAULT_DISPATCHER_THREADS = 
GatewaySender.DEFAULT_HDFS_DISPATCHER_THREADS;
-  
+
   public static final boolean DEFAULT_MINOR_COMPACTION = true;
   public static final int DEFAULT_MINOR_COMPACTION_THREADS = 10;
   public static final boolean DEFAULT_MAJOR_COMPACTION = true;
   public static final int DEFAULT_MAJOR_COMPACTION_THREADS = 2;
-  public static final int DEFAULT_MAX_INPUT_FILE_SIZE_MB = 512;
-  public static final int DEFAULT_MAX_INPUT_FILE_COUNT = 10;
-  public static final int DEFAULT_MIN_INPUT_FILE_COUNT = 4;
-  
+  public static final int DEFAULT_INPUT_FILE_SIZE_MAX_MB = 512;
+  public static final int DEFAULT_INPUT_FILE_COUNT_MAX = 10;
+  public static final int DEFAULT_INPUT_FILE_COUNT_MIN = 4;
+
   public static final int DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS = 720;
   public static final int DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS = 30;
 
@@ -64,7 +64,16 @@ public interface HDFSStore {
    * URL or NameNode Service URL. NameNode URL can also be provided via
    * hdfs-site.xml (see HDFSClientConfigFile). If the NameNode url is missing
    * HDFSStore creation will fail. HDFS client can also load hdfs configuration
-   * files in the classpath. NameNode URL provided in this way is also fine.
+   * files in the classpath. The following precedence order is applied
+   * <ol>
+   * <li>URL explicitly configured in the HdfsStore
+   * <li>URL provided in client configuration file:
+   * {@link #getHDFSClientConfigFile()}
+   * <li>URL provided in default configuration files loaded by hdfs-client
+   * </ol>
+   * 
+   * HDFSStore will use the selected URL only. It will fail if the selected URL
+   * is not reachable.
    * 
    * @return Namenode url explicitly configured by user
    */
@@ -108,7 +117,7 @@ public interface HDFSStore {
 
   /**
    * @return the percentage of the heap to use for the block cache in the range
-   * 0 ... 100
+   *         0 ... 100
    */
   public float getBlockCacheSize();
 
@@ -116,24 +125,24 @@ public interface HDFSStore {
    * HDFSStore buffer data is persisted on HDFS in batches. The BatchSize
    * defines the maximum size (in megabytes) of each batch that is written to
    * HDFS. This parameter, along with BatchInterval determines the rate at 
which
-   * data is persisted on HDFS. A higher value means that less number of bigger
-   * batches are persisted to HDFS and hence big files are created on HDFS. 
But,
-   * bigger batches consume memory.
+   * data is persisted on HDFS. A higher value causes fewer and bigger batches
+   * to be persisted to HDFS and hence big files are created on HDFS. But,
+   * bigger batches consume more memory.
    * 
-   * @return batchsize in MB
+   * @return batch size in MB
    */
   public int getBatchSize();
-  
+
   /**
    * HDFSStore buffer data is persisted on HDFS in batches, and the
-   * BatchInterval defines the maximum time that can elapse between writing
-   * batches to HDFS. This parameter, along with BatchSize determines the rate
-   * at which data is persisted on HDFS.
+   * BatchInterval defines the number of milliseconds that can elapse between
+   * writing batches to HDFS. This parameter, along with BatchSize determines
+   * the rate at which data is persisted on HDFS.
    * 
-   * @return interval in seconds
+   * @return batch interval in milliseconds
    */
   public int getBatchInterval();
-  
+
   /**
    * The maximum number of threads (per region) used to write batches to HDFS.
    * If you have a large number of clients that add or update data in a region,
@@ -143,12 +152,12 @@ public interface HDFSStore {
    * @return The maximum number of threads
    */
   public int getDispatcherThreads();
-  
+
   /**
    * Configure if HDFSStore in-memory buffer data, that has not been persisted
-   * on HDFS yet, should be persisted to a local disk to buffer prevent data
-   * loss. Persisting data may impact write performance. If performance is
-   * critical and buffer data loss is acceptable, disable persistence.
+   * on HDFS yet, should be persisted to a local disk to prevent buffer data
+   * loss. Persisting buffer data may impact write performance. If performance
+   * is critical and buffer data loss is acceptable, disable persistence.
    * 
    * @return true if buffer is persisted locally
    */
@@ -165,38 +174,44 @@ public interface HDFSStore {
   public String getDiskStoreName();
 
   /**
-   * Synchronous flag indicates if synchronous disk writes are enabled or not.
+   * HDFS buffers can be persisted on local disk. Each region update record is
+   * written to the disk synchronously if synchronous disk write is enabled.
+   * Enable this option if the data being persisted is critical and no record
+   * should be lost in case of a crash. This high reliability mode may increase
+   * write latency. If synchronous mode is disabled, data is persisted in
+   * batches which usually results in better performance.
    * 
    * @return true if enabled
    */
   public boolean getSynchronousDiskWrite();
-  
+
   /**
    * For HDFS write-only regions, this defines the maximum size (in megabytes)
    * that an HDFS log file can reach before HDFSStore closes the file and 
begins
-   * writing to a new file. This clause is ignored for HDFS read/write regions.
+   * writing to a new file. This option is ignored for HDFS read/write regions.
    * Keep in mind that the files are not available for MapReduce processing
    * until the file is closed; you can also set WriteOnlyFileRolloverInterval 
to
    * specify the maximum amount of time an HDFS log file remains open.
    * 
    * @return max file size in MB.
    */
-  public int getMaxWriteOnlyFileSize();
-  
+  public int getWriteOnlyFileSizeLimit();
+
   /**
-   * For HDFS write-only regions, this defines the maximum time that can elapse
-   * before HDFSStore closes an HDFS file and begins writing to a new file. 
This
-   * configuration is ignored for HDFS read/write regions.
+   * For HDFS write-only regions, this defines the number of seconds that can
+   * elapse before HDFSStore closes an HDFS file and begins writing to a new
+   * file. This configuration is ignored for HDFS read/write regions.
    * 
-   * @return interval in seconds 
+   * @return interval in seconds
    */
   public int getWriteOnlyFileRolloverInterval();
-  
+
   /**
    * Minor compaction reorganizes data in files to optimize read performance 
and
    * reduce number of files created on HDFS. Minor compaction process can be
    * I/O-intensive, tune the performance of minor compaction using
-   * MinorCompactionThreads.
+   * MinorCompactionThreads. Minor compaction is not applicable to write-only
+   * regions.
    * 
    * @return true if auto minor compaction is enabled
    */
@@ -206,6 +221,7 @@ public interface HDFSStore {
    * The maximum number of threads that HDFSStore uses to perform minor
    * compaction. You can increase the number of threads used for compaction as
    * necessary in order to fully utilize the performance of your HDFS cluster.
+   * Minor compaction is not applicable to write-only regions.
    * 
    * @return maximum number of threads executing minor compaction
    */
@@ -216,17 +232,18 @@ public interface HDFSStore {
    * HDFS files, which can save space in HDFS and improve performance when
    * reading from HDFS. As major compaction process can be long-running and
    * I/O-intensive, tune the performance of major compaction using
-   * MajorCompactionInterval and MajorCompactionThreads.
+   * MajorCompactionInterval and MajorCompactionThreads. Major compaction is 
not
+   * applicable to write-only regions.
    * 
    * @return true if auto major compaction is enabled
    */
   public boolean getMajorCompaction();
 
   /**
-   * The amount of time after which HDFSStore performs the next major 
compaction
-   * cycle.
+   * The number of minutes after which HDFSStore performs the next major
+   * compaction cycle. Major compaction is not applicable to write-only 
regions.
    * 
-   * @return interval in seconds
+   * @return interval in minutes
    */
   public int getMajorCompactionInterval();
 
@@ -234,34 +251,39 @@ public interface HDFSStore {
    * The maximum number of threads that HDFSStore uses to perform major
    * compaction. You can increase the number of threads used for compaction as
    * necessary in order to fully utilize the performance of your HDFS cluster.
+   * Major compaction is not applicable to write-only regions.
    * 
    * @return maximum number of threads executing major compaction
    */
   public int getMajorCompactionThreads();
-  
+
   /**
-   * HDFSStore creates new files as part of periodic maintenance activity.
-   * Existing files are deleted asynchronously. PurgeInterval defines the 
amount
-   * of time old files remain available and could be externally, e.g. read by 
MR
-   * jobs. After this interval has passed, old files are deleted.
+   * HDFSStore may create new files as part of periodic maintenance activity. 
It
+   * deletes old files asynchronously. PurgeInterval defines the number of
+   * minutes for which old files will remain available to be consumed
+   * externally, e.g. read by MR jobs. After this interval, old files are
+   * deleted. This configuration is not applicable to write-only regions
    * 
-   * @return interval configuration that guides deletion of old files
+   * @return old file purge interval in minutes
    */
   public int getPurgeInterval();
-  
+
   /**
-   * Permanently deletes all HDFS files associated with this this
-   * {@link HDFSStore}. This operation will fail ( {@link 
IllegalStateException}
-   * ) if any region is still using this store for persistence.
+   * Permanently deletes all HDFS files associated with this {@link HDFSStore}.
+   * This operation will fail if any region is still using this store for
+   * persistence.
+   * 
+   * @exception IllegalStateException
+   *              if any region using this hdfsStore still exists
    */
   public void destroy();
-  
+
   /**
    * @return new instance of mutator object that can be used to alter 
properties
    *         of this store
    */
   public HDFSStoreMutator createHdfsStoreMutator();
-  
+
   /**
    * Identifies attributes configured in {@link HDFSStoreMutator} and applies
    * the new attribute values to this instance of {@link HDFSStore} 
dynamically.
@@ -275,23 +297,38 @@ public interface HDFSStore {
   public HDFSStore alter(HDFSStoreMutator mutator);
 
   /**
-   * This advanced configuration affects minor compaction.
-   * @return size threshold (in MB). A file larger than this size will not be
-   *         considered for compaction
+   * A file larger than this size, in megabytes, will not be compacted by minor
+   * compactor. Increasing this value will result in compaction of bigger 
files.
+   * This will lower the number of files on HDFS at the cost of increased IO.
+   * This option is for advanced users and will need tuning in special cases
+   * only. This option is not applicable to write-only regions.
+   * 
+   * @return size threshold (in MB)
    */
-  public int getMaxInputFileSizeMB();
+  public int getInputFileSizeMax();
 
   /**
-   * This advanced configuration affects minor compaction.
-   * @return minimum count threshold. Compaction cycle will commence if the
-   *         number of files to be compacted is more than this number
+   * A minimum number of files must exist in a bucket directory on HDFS before
+   * minor compaction will start compaction. Keeping a higher value for this
+   * option will reduce the frequency of minor compaction, which in turn may
+   * result in reduced IO overhead. However it may result in increased pressure
+   * on HDFS NameNode. This option is for advanced users and will need tuning 
in
+   * special cases only. This option is not applicable to write-only regions.
+   * 
+   * @return minimum number of files for minor compaction to get triggered
    */
-  public int getMinInputFileCount();
+  public int getInputFileCountMin();
 
   /**
-   * This advanced configuration affects minor compaction.
-   * @return maximum count threshold.  Compaction cycle will not include more
-   *          files than the maximum
+   * The maximum number of files compacted by Minor compactor in a cycle.
+   * Keeping a higher value for this option will reduce the frequency of minor
+   * compaction, which in turn may result in reduced IO overhead. However it 
may
+   * result in large number of concurrent IO operations which in-turn may
+   * degrade the performance. This option is for advanced users and will need
+   * tuning in special cases only. This option is not applicable to write-only
+   * regions.
+   * 
+   * @return maximum number of files minor compacted in one cycle
    */
-  public int getMaxInputFileCount();
+  public int getInputFileCountMax();
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
index 949ff40..2684de5 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
@@ -48,61 +48,91 @@ public interface HDFSStoreFactory {
 
   /**
    * @see HDFSStore#getHDFSClientConfigFile()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 or more than 100
    */
-  public HDFSStoreFactory setBlockCacheSize(float percentage);
+  public HDFSStoreFactory setBlockCacheSize(float value);
 
   /**
-   * @see HDFSStore#getMaxWriteOnlyFileSize()
+   * Default value {@link HDFSStore#DEFAULT_WRITE_ONLY_FILE_SIZE_LIMIT}
+   * @see HDFSStore#getWriteOnlyFileSizeLimit()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
-  public HDFSStoreFactory setMaxWriteOnlyFileSize(int maxFileSize);
+  public HDFSStoreFactory setWriteOnlyFileSizeLimit(int maxFileSize);
 
   /**
+   * Default value {@link HDFSStore#DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL}
    * @see HDFSStore#getWriteOnlyFileRolloverInterval()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
   public HDFSStoreFactory setWriteOnlyFileRolloverInterval(int interval);
 
   /**
+   * Default value {@link HDFSStore#DEFAULT_MINOR_COMPACTION}
    * @see HDFSStore#getMinorCompaction()
    */
   public HDFSStoreFactory setMinorCompaction(boolean auto);
 
   /**
+   * Default value {@link HDFSStore#DEFAULT_MINOR_COMPACTION_THREADS}
    * @see HDFSStore#getMinorCompactionThreads()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
   public HDFSStoreFactory setMinorCompactionThreads(int count);
 
   /**
+   * Default value {@link HDFSStore#DEFAULT_MAJOR_COMPACTION}
    * @see HDFSStore#getMajorCompaction()
    */
   public HDFSStoreFactory setMajorCompaction(boolean auto);
 
   /**
+   * Default value {@link HDFSStore#DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS}
    * @see HDFSStore#getMajorCompactionInterval()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
   public HDFSStoreFactory setMajorCompactionInterval(int interval);
 
   /**
+   * Default value {@link HDFSStore#DEFAULT_MAJOR_COMPACTION_THREADS}
    * @see HDFSStore#getMajorCompactionThreads()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
   public HDFSStoreFactory setMajorCompactionThreads(int count);
 
   /**
-   * @see HDFSStore#getMaxInputFileSizeMB()
+   * Default value {@link HDFSStore#DEFAULT_INPUT_FILE_SIZE_MAX_MB}
+   * @see HDFSStore#getInputFileSizeMax()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
-  public HDFSStoreFactory setMaxInputFileSizeMB(int size);
+  public HDFSStoreFactory setInputFileSizeMax(int size);
 
   /**
-   * @see HDFSStore#getMinInputFileCount()
+   * Default value {@link HDFSStore#DEFAULT_INPUT_FILE_COUNT_MIN}
+   * @see HDFSStore#getInputFileCountMin()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
-  public HDFSStoreFactory setMinInputFileCount(int count);
+  public HDFSStoreFactory setInputFileCountMin(int count);
 
   /**
-   * @see HDFSStore#getMaxInputFileCount()
+   * Default value {@link HDFSStore#DEFAULT_INPUT_FILE_COUNT_MAX}
+   * @see HDFSStore#getInputFileCountMax()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
-  public HDFSStoreFactory setMaxInputFileCount(int count);
+  public HDFSStoreFactory setInputFileCountMax(int count);
 
   /**
    * @see HDFSStore#getPurgeInterval()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
   public HDFSStoreFactory setPurgeInterval(int interval);
 
@@ -113,16 +143,22 @@ public interface HDFSStoreFactory {
 
   /**
    * @see HDFSStore#getMaxMemory()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
   public HDFSStoreFactory setMaxMemory(int memory);
 
   /**
    * @see HDFSStore#getBatchInterval()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
   public HDFSStoreFactory setBatchInterval(int interval);
 
   /**
    * @see HDFSStore#getBatchSize()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
   public HDFSStoreFactory setBatchSize(int size);
 
@@ -138,6 +174,8 @@ public interface HDFSStoreFactory {
 
   /**
    * @see HDFSStore#getDispatcherThreads()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
    */
   public HDFSStoreFactory setDispatcherThreads(int dispatcherThreads);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
index 7b0229c..ba2dc18 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
@@ -8,19 +8,27 @@
 
 package com.gemstone.gemfire.cache.hdfs;
 
+/**
+ * HDFSStoreMutator provides a means to dynamically alter {@link HDFSStore}'s
+ * behavior. Instances of this interface are created using
+ * {@link HDFSStore#createHdfsStoreMutator} and applied using
+ * {@link HDFSStore#alter}
+ * 
+ * @author ashvina
+ */
 public interface HDFSStoreMutator {
   /**
-   * {@link HDFSStoreFactory#setMaxWriteOnlyFileSize(int)}
+   * {@link HDFSStoreFactory#setWriteOnlyFileSizeLimit(int)}
    */
-  public HDFSStoreMutator setMaxWriteOnlyFileSize(int maxFileSize);
+  public HDFSStoreMutator setWriteOnlyFileSizeLimit(int maxFileSize);
 
   /**
-   * {@link HDFSStore#getMaxWriteOnlyFileSize()}
+   * {@link HDFSStore#getWriteOnlyFileSizeLimit()}
    * 
    * @return value to be used when mutator is executed on hdfsStore. -1 if not
    *         set
    */
-  public int getMaxWriteOnlyFileSize();
+  public int getWriteOnlyFileSizeLimit();
 
   /**
    * {@link HDFSStoreFactory#setWriteOnlyFileRolloverInterval(int)}
@@ -101,43 +109,43 @@ public interface HDFSStoreMutator {
   public int getMajorCompactionThreads();
 
   /**
-   * {@link HDFSStoreFactory#setMaxInputFileSizeMB(int)}
+   * {@link HDFSStoreFactory#setInputFileSizeMax(int)}
    */
-  public HDFSStoreMutator setMaxInputFileSizeMB(int size);
+  public HDFSStoreMutator setInputFileSizeMax(int size);
 
   /**
-   * {@link HDFSStore#getMaxInputFileSizeMB()}
+   * {@link HDFSStore#getInputFileSizeMax()}
    * 
    * @return value to be used when mutator is executed on hdfsStore. -1 if not
    *         set
    */
-  public int getMaxInputFileSizeMB();
+  public int getInputFileSizeMax();
 
   /**
-   * {@link HDFSStoreFactory#setMinInputFileCount(int)}
+   * {@link HDFSStoreFactory#setInputFileCountMin(int)}
    */
-  public HDFSStoreMutator setMinInputFileCount(int count);
+  public HDFSStoreMutator setInputFileCountMin(int count);
 
   /**
-   * {@link HDFSStore#getMinInputFileCount()}
+   * {@link HDFSStore#getInputFileCountMin()}
    * 
    * @return value to be used when mutator is executed on hdfsStore. -1 if not
    *         set
    */
-  public int getMinInputFileCount();
+  public int getInputFileCountMin();
 
   /**
-   * {@link HDFSStoreFactory#setMaxInputFileCount(int)}
+   * {@link HDFSStoreFactory#setInputFileCountMax(int)}
    */
-  public HDFSStoreMutator setMaxInputFileCount(int count);
+  public HDFSStoreMutator setInputFileCountMax(int count);
 
   /**
-   * {@link HDFSStore#getMaxInputFileCount()}
+   * {@link HDFSStore#getInputFileCountMax()}
    * 
    * @return value to be used when mutator is executed on hdfsStore. -1 if not
    *         set
    */
-  public int getMaxInputFileCount();
+  public int getInputFileCountMax();
 
   /**
    * {@link HDFSStoreFactory#setPurgeInterval(int)}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
index d663e3d..3fe37dc 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
@@ -37,16 +37,16 @@ public class HDFSStoreConfigHolder implements HDFSStore, 
HDFSStoreFactory ,Seria
   private String homeDir = DEFAULT_HOME_DIR;
   private String clientConfigFile = null;
   private float blockCacheSize = DEFAULT_BLOCK_CACHE_SIZE;
-  private int maxFileSize = DEFAULT_MAX_WRITE_ONLY_FILE_SIZE;
+  private int maxFileSize = DEFAULT_WRITE_ONLY_FILE_SIZE_LIMIT;
   private int fileRolloverInterval = DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL;
   protected boolean isAutoCompact = DEFAULT_MINOR_COMPACTION;
   protected boolean autoMajorCompact = DEFAULT_MAJOR_COMPACTION;
   protected int maxConcurrency = DEFAULT_MINOR_COMPACTION_THREADS;
   protected int majorCompactionConcurrency = DEFAULT_MAJOR_COMPACTION_THREADS;
   protected int majorCompactionIntervalMins = 
DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS;
-  protected int maxInputFileSizeMB = DEFAULT_MAX_INPUT_FILE_SIZE_MB;
-  protected int maxInputFileCount = DEFAULT_MAX_INPUT_FILE_COUNT;
-  protected int minInputFileCount = DEFAULT_MIN_INPUT_FILE_COUNT;
+  protected int maxInputFileSizeMB = DEFAULT_INPUT_FILE_SIZE_MAX_MB;
+  protected int maxInputFileCount = DEFAULT_INPUT_FILE_COUNT_MAX;
+  protected int minInputFileCount = DEFAULT_INPUT_FILE_COUNT_MIN;
   protected int oldFileCleanupIntervalMins = 
DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS;
   
   protected int batchSize = DEFAULT_BATCH_SIZE_MB;
@@ -78,16 +78,16 @@ public class HDFSStoreConfigHolder implements HDFSStore, 
HDFSStoreFactory ,Seria
     this.homeDir = config.getHomeDir();
     this.clientConfigFile = config.getHDFSClientConfigFile();
     this.blockCacheSize = config.getBlockCacheSize();
-    this.maxFileSize = config.getMaxWriteOnlyFileSize();
+    this.maxFileSize = config.getWriteOnlyFileSizeLimit();
     this.fileRolloverInterval = config.getWriteOnlyFileRolloverInterval();
     isAutoCompact = config.getMinorCompaction();
     maxConcurrency = config.getMinorCompactionThreads();
     autoMajorCompact = config.getMajorCompaction();
     majorCompactionConcurrency = config.getMajorCompactionThreads();
     majorCompactionIntervalMins = config.getMajorCompactionInterval();
-    maxInputFileSizeMB = config.getMaxInputFileSizeMB();
-    maxInputFileCount = config.getMaxInputFileCount();
-    minInputFileCount = config.getMinInputFileCount();
+    maxInputFileSizeMB = config.getInputFileSizeMax();
+    maxInputFileCount = config.getInputFileCountMax();
+    minInputFileCount = config.getInputFileCountMin();
     oldFileCleanupIntervalMins = config.getPurgeInterval();
     
     batchSize = config.getBatchSize();
@@ -133,9 +133,9 @@ public class HDFSStoreConfigHolder implements HDFSStore, 
HDFSStoreFactory ,Seria
       logAttrMutation("fileRolloverInterval", 
mutator.getWriteOnlyFileRolloverInterval());
       
setWriteOnlyFileRolloverInterval(mutator.getWriteOnlyFileRolloverInterval());
     }
-    if (mutator.getMaxWriteOnlyFileSize() >= 0) {
+    if (mutator.getWriteOnlyFileSizeLimit() >= 0) {
       logAttrMutation("MaxFileSize", 
mutator.getWriteOnlyFileRolloverInterval());
-      setMaxWriteOnlyFileSize(mutator.getMaxWriteOnlyFileSize());
+      setWriteOnlyFileSizeLimit(mutator.getWriteOnlyFileSizeLimit());
     }
     
     if (mutator.getMinorCompaction() != null) {
@@ -160,17 +160,17 @@ public class HDFSStoreConfigHolder implements HDFSStore, 
HDFSStoreFactory ,Seria
       logAttrMutation("AutoMajorCompaction", mutator.getMajorCompaction());
       setMajorCompaction(mutator.getMajorCompaction());
     }
-    if (mutator.getMaxInputFileCount() >= 0) {
-      logAttrMutation("maxInputFileCount", mutator.getMaxInputFileCount());
-      setMaxInputFileCount(mutator.getMaxInputFileCount());
+    if (mutator.getInputFileCountMax() >= 0) {
+      logAttrMutation("maxInputFileCount", mutator.getInputFileCountMax());
+      setInputFileCountMax(mutator.getInputFileCountMax());
     }
-    if (mutator.getMaxInputFileSizeMB() >= 0) {
-      logAttrMutation("MaxInputFileSizeMB", mutator.getMaxInputFileSizeMB());
-      setMaxInputFileSizeMB(mutator.getMaxInputFileSizeMB());
+    if (mutator.getInputFileSizeMax() >= 0) {
+      logAttrMutation("MaxInputFileSizeMB", mutator.getInputFileSizeMax());
+      setInputFileSizeMax(mutator.getInputFileSizeMax());
     }
-    if (mutator.getMinInputFileCount() >= 0) {
-      logAttrMutation("MinInputFileCount", mutator.getMinInputFileCount());
-      setMinInputFileCount(mutator.getMinInputFileCount());
+    if (mutator.getInputFileCountMin() >= 0) {
+      logAttrMutation("MinInputFileCount", mutator.getInputFileCountMin());
+      setInputFileCountMin(mutator.getInputFileCountMin());
     }    
     if (mutator.getPurgeInterval() >= 0) {
       logAttrMutation("OldFilesCleanupIntervalMins", 
mutator.getPurgeInterval());
@@ -248,13 +248,13 @@ public class HDFSStoreConfigHolder implements HDFSStore, 
HDFSStoreFactory ,Seria
   }
   
   @Override
-  public HDFSStoreFactory setMaxWriteOnlyFileSize(int maxFileSize) {
+  public HDFSStoreFactory setWriteOnlyFileSizeLimit(int maxFileSize) {
     assertIsPositive(CacheXml.HDFS_WRITE_ONLY_FILE_ROLLOVER_INTERVAL, 
maxFileSize);
     this.maxFileSize = maxFileSize;
     return this;
   }
   @Override
-  public int getMaxWriteOnlyFileSize() {
+  public int getWriteOnlyFileSizeLimit() {
     return maxFileSize;
   }
 
@@ -323,35 +323,35 @@ public class HDFSStoreConfigHolder implements HDFSStore, 
HDFSStoreFactory ,Seria
   }
   
   @Override
-  public HDFSStoreFactory setMaxInputFileSizeMB(int size) {
+  public HDFSStoreFactory setInputFileSizeMax(int size) {
     
HDFSStoreCreation.assertIsPositive("HDFS_COMPACTION_MAX_INPUT_FILE_SIZE_MB", 
size);
     this.maxInputFileSizeMB = size;
     return this;
   }
   @Override
-  public int getMaxInputFileSizeMB() {
+  public int getInputFileSizeMax() {
     return maxInputFileSizeMB;
   }
 
   @Override
-  public HDFSStoreFactory setMinInputFileCount(int count) {
+  public HDFSStoreFactory setInputFileCountMin(int count) {
     HDFSStoreCreation.assertIsPositive("HDFS_COMPACTION_MIN_INPUT_FILE_COUNT", 
count);
     this.minInputFileCount = count;
     return this;
   }
   @Override
-  public int getMinInputFileCount() {
+  public int getInputFileCountMin() {
     return minInputFileCount;
   }
 
   @Override
-  public HDFSStoreFactory setMaxInputFileCount(int count) {
+  public HDFSStoreFactory setInputFileCountMax(int count) {
     HDFSStoreCreation.assertIsPositive("HDFS_COMPACTION_MAX_INPUT_FILE_COUNT", 
count);
     this.maxInputFileCount = count;
     return this;
   }
   @Override
-  public int getMaxInputFileCount() {
+  public int getInputFileCountMax() {
     return maxInputFileCount;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
index e7121aa..1d4bed8 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
@@ -63,8 +63,8 @@ public class HDFSStoreCreation implements HDFSStoreFactory {
   }
   
   @Override
-  public HDFSStoreFactory setMaxWriteOnlyFileSize(int maxFileSize) {
-    configHolder.setMaxWriteOnlyFileSize(maxFileSize);
+  public HDFSStoreFactory setWriteOnlyFileSizeLimit(int maxFileSize) {
+    configHolder.setWriteOnlyFileSizeLimit(maxFileSize);
     return this;
   }
 
@@ -105,20 +105,20 @@ public class HDFSStoreCreation implements 
HDFSStoreFactory {
   }
 
   @Override
-  public HDFSStoreFactory setMaxInputFileSizeMB(int size) {
-    configHolder.setMaxInputFileSizeMB(size);
+  public HDFSStoreFactory setInputFileSizeMax(int size) {
+    configHolder.setInputFileSizeMax(size);
     return this;
   }
 
   @Override
-  public HDFSStoreFactory setMinInputFileCount(int count) {
-    configHolder.setMinInputFileCount(count);
+  public HDFSStoreFactory setInputFileCountMin(int count) {
+    configHolder.setInputFileCountMin(count);
     return this;
   }
 
   @Override
-  public HDFSStoreFactory setMaxInputFileCount(int count) {
-    configHolder.setMaxInputFileCount(count);
+  public HDFSStoreFactory setInputFileCountMax(int count) {
+    configHolder.setInputFileCountMax(count);
     return this;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
index 451ac79..0393118 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
@@ -494,8 +494,8 @@ public class HDFSStoreImpl implements HDFSStore {
   }
 
   @Override
-  public int getMaxWriteOnlyFileSize() {
-    return configHolder.getMaxWriteOnlyFileSize();
+  public int getWriteOnlyFileSizeLimit() {
+    return configHolder.getWriteOnlyFileSizeLimit();
   }
 
   @Override
@@ -530,18 +530,18 @@ public class HDFSStoreImpl implements HDFSStore {
 
 
   @Override
-  public int getMaxInputFileSizeMB() {
-    return configHolder.getMaxInputFileSizeMB();
+  public int getInputFileSizeMax() {
+    return configHolder.getInputFileSizeMax();
   }
 
   @Override
-  public int getMinInputFileCount() {
-    return configHolder.getMinInputFileCount();
+  public int getInputFileCountMin() {
+    return configHolder.getInputFileCountMin();
   }
 
   @Override
-  public int getMaxInputFileCount() {
-    return configHolder.getMaxInputFileCount();
+  public int getInputFileCountMax() {
+    return configHolder.getInputFileCountMax();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
index 46797c4..074da57 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
@@ -26,13 +26,13 @@ public class HDFSStoreMutatorImpl implements 
HDFSStoreMutator {
     configHolder = new HDFSStoreConfigHolder(store);
   }
   
-  public HDFSStoreMutator setMaxWriteOnlyFileSize(int maxFileSize) {
-    configHolder.setMaxWriteOnlyFileSize(maxFileSize);
+  public HDFSStoreMutator setWriteOnlyFileSizeLimit(int maxFileSize) {
+    configHolder.setWriteOnlyFileSizeLimit(maxFileSize);
     return this;
   }
   @Override
-  public int getMaxWriteOnlyFileSize() {
-    return configHolder.getMaxWriteOnlyFileSize();
+  public int getWriteOnlyFileSizeLimit() {
+    return configHolder.getWriteOnlyFileSizeLimit();
   }
 
   @Override
@@ -98,33 +98,33 @@ public class HDFSStoreMutatorImpl implements 
HDFSStoreMutator {
   }
 
   @Override
-  public HDFSStoreMutator setMaxInputFileSizeMB(int size) {
-    configHolder.setMaxInputFileSizeMB(size);
+  public HDFSStoreMutator setInputFileSizeMax(int size) {
+    configHolder.setInputFileSizeMax(size);
     return this;
   }
   @Override
-  public int getMaxInputFileSizeMB() {
-    return configHolder.getMaxInputFileSizeMB();
+  public int getInputFileSizeMax() {
+    return configHolder.getInputFileSizeMax();
   }
   
   @Override
-  public HDFSStoreMutator setMinInputFileCount(int count) {
-    configHolder.setMinInputFileCount(count);
+  public HDFSStoreMutator setInputFileCountMin(int count) {
+    configHolder.setInputFileCountMin(count);
     return this;
   }
   @Override
-  public int getMinInputFileCount() {
-    return configHolder.getMinInputFileCount();
+  public int getInputFileCountMin() {
+    return configHolder.getInputFileCountMin();
   }
   
   @Override
-  public HDFSStoreMutator setMaxInputFileCount(int count) {
-    configHolder.setMaxInputFileCount(count);
+  public HDFSStoreMutator setInputFileCountMax(int count) {
+    configHolder.setInputFileCountMax(count);
     return this;
   }
   @Override
-  public int getMaxInputFileCount() {
-    return configHolder.getMaxInputFileCount();
+  public int getInputFileCountMax() {
+    return configHolder.getInputFileCountMax();
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
index 976482a..bc6bda0 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
@@ -159,7 +159,7 @@ public class HDFSUnsortedHoplogOrganizer extends 
AbstractHoplogOrganizer<Unsorte
         // append completed. If the file is to be rolled over, 
         // close writer and rename the file to a legitimate name.
         // Else, sync the already written data with HDFS nodes. 
-        int maxFileSize = this.store.getMaxWriteOnlyFileSize() * 1024 * 1024;  
+        int maxFileSize = this.store.getWriteOnlyFileSizeLimit() * 1024 * 
1024;  
         int fileRolloverInterval = 
this.store.getWriteOnlyFileRolloverInterval(); 
         if (writer.getCurrentSize() >= maxFileSize || 
             timeSinceLastFlush >= fileRolloverInterval) {
@@ -196,7 +196,7 @@ public class HDFSUnsortedHoplogOrganizer extends 
AbstractHoplogOrganizer<Unsorte
       if (writerSize < (minsizeforrollover * 1024L))
         return;
       
-      int maxFileSize = this.store.getMaxWriteOnlyFileSize() * 1024 * 1024;  
+      int maxFileSize = this.store.getWriteOnlyFileSizeLimit() * 1024 * 1024;  
       int fileRolloverInterval = 
this.store.getWriteOnlyFileRolloverInterval(); 
       if (writerSize >= maxFileSize || 
           timeSinceLastFlush >= fileRolloverInterval || forceClose) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
index 2a8eed6..0b96557 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
@@ -1355,9 +1355,9 @@ public class HdfsSortedOplogOrganizer extends 
AbstractHoplogOrganizer<SortedHopl
       // minimum number of files that must be present for compaction to be 
worth
       final int MIN_FILE_COUNT_COMPACTION;
       
-      MAX_COMPACTION_FILE_SIZE = ((long)store.getMaxInputFileSizeMB()) * 1024 
*1024;
-      MAX_FILE_COUNT_COMPACTION = store.getMaxInputFileCount();
-      MIN_FILE_COUNT_COMPACTION = store.getMinInputFileCount();
+      MAX_COMPACTION_FILE_SIZE = ((long)store.getInputFileSizeMax()) * 1024 
*1024;
+      MAX_FILE_COUNT_COMPACTION = store.getInputFileCountMax();
+      MIN_FILE_COUNT_COMPACTION = store.getInputFileCountMin();
 
       try {
         // skip till first file smaller than the max compaction file size is

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
index 3ec5b92..7dd949a 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
@@ -1124,7 +1124,7 @@ public class CacheXmlParser extends CacheXml implements 
ContentHandler {
     
     String maxFileSize = atts.getValue(HDFS_MAX_WRITE_ONLY_FILE_SIZE);
     if (maxFileSize != null) {
-      attrs.setMaxWriteOnlyFileSize(parseInt(maxFileSize));
+      attrs.setWriteOnlyFileSizeLimit(parseInt(maxFileSize));
     }
     
     String fileRolloverInterval = 
atts.getValue(HDFS_WRITE_ONLY_FILE_ROLLOVER_INTERVAL);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
index a2f4138..dc3c0c3 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
@@ -211,7 +211,7 @@ public class HDFSStoreCommands   extends 
AbstractCommandsSupport {
     if (homeDir != null)
       configHolder.setHomeDir(homeDir);
     if (maxWriteonlyFileSize != null)
-      configHolder.setMaxWriteOnlyFileSize(maxWriteonlyFileSize);
+      configHolder.setWriteOnlyFileSizeLimit(maxWriteonlyFileSize);
     if (namenode != null)
       configHolder.setNameNodeURL(namenode);
 
@@ -356,7 +356,7 @@ public class HDFSStoreCommands   extends 
AbstractCommandsSupport {
     hdfsStoreSection.addData("Home Dir", storePrms.getHomeDir());
     hdfsStoreSection.addData("Block Cache", storePrms.getBlockCacheSize());
     hdfsStoreSection.addData("File RollOver Interval", 
storePrms.getWriteOnlyFileRolloverInterval());
-    hdfsStoreSection.addData("Max WriteOnly File Size", 
storePrms.getMaxWriteOnlyFileSize());
+    hdfsStoreSection.addData("Max WriteOnly File Size", 
storePrms.getWriteOnlyFileSizeLimit());
 
     hdfsStoreSection.addData("Client Configuration File", 
storePrms.getHDFSClientConfigFile());
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
index adec764..6b5f66c 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
@@ -109,7 +109,7 @@ public class AlterHDFSStoreFunction extends FunctionAdapter 
implements InternalE
                                        .getFileRolloverInterval());
 
                if (alterAttributes.getMaxWriteonlyFileSize() != null)
-                       
storeMutator.setMaxWriteOnlyFileSize(alterAttributes.getMaxWriteonlyFileSize());
+                       
storeMutator.setWriteOnlyFileSizeLimit(alterAttributes.getMaxWriteonlyFileSize());
 
                if (alterAttributes.getMinorCompact() != null)
                        
storeMutator.setMinorCompaction(alterAttributes.getMinorCompact());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
 
b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
index 460feba..e079a27 100644
--- 
a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
+++ 
b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
@@ -91,7 +91,7 @@ public class CreateHDFSStoreFunction extends FunctionAdapter 
implements Internal
     
hdfsStoreFactory.setWriteOnlyFileRolloverInterval(configHolder.getWriteOnlyFileRolloverInterval());
     hdfsStoreFactory.setHomeDir(configHolder.getHomeDir());
     
hdfsStoreFactory.setHDFSClientConfigFile(configHolder.getHDFSClientConfigFile());
-    
hdfsStoreFactory.setMaxWriteOnlyFileSize(configHolder.getMaxWriteOnlyFileSize());
+    
hdfsStoreFactory.setWriteOnlyFileSizeLimit(configHolder.getWriteOnlyFileSizeLimit());
     hdfsStoreFactory.setMajorCompaction(configHolder.getMajorCompaction());
     
hdfsStoreFactory.setMajorCompactionInterval(configHolder.getMajorCompactionInterval());
     
hdfsStoreFactory.setMajorCompactionThreads(configHolder.getMajorCompactionThreads());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
index 0756e27..f127afa 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
@@ -81,7 +81,7 @@ public class HDFSConfigJUnitTest extends TestCase {
         assertEquals(false, r1.getAttributes().getHDFSWriteOnly());
         assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + 
store.getDiskStoreName() + " and expected getDiskStoreName: null", 
store.getDiskStoreName()== null);
         assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " 
+ store.getWriteOnlyFileRolloverInterval() + " and expected 
getFileRolloverInterval: 3600", store.getWriteOnlyFileRolloverInterval() == 
3600);
-        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + 
store.getMaxWriteOnlyFileSize() + " and expected getMaxFileSize: 256MB", 
store.getMaxWriteOnlyFileSize() == 256);
+        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + 
store.getWriteOnlyFileSizeLimit() + " and expected getMaxFileSize: 256MB", 
store.getWriteOnlyFileSizeLimit() == 256);
         this.c.close();
         
         
@@ -121,7 +121,7 @@ public class HDFSConfigJUnitTest extends TestCase {
         hsf.setSynchronousDiskWrite(false);
         hsf.setHomeDir("/home/hemant");
         hsf.setNameNodeURL("mymachine");
-        hsf.setMaxWriteOnlyFileSize(1);
+        hsf.setWriteOnlyFileSizeLimit(1);
         hsf.setWriteOnlyFileRolloverInterval(10);
         hsf.create("myHDFSStore");
         
@@ -142,7 +142,7 @@ public class HDFSConfigJUnitTest extends TestCase {
         assertTrue("Mismatch in attributes, actual.batchInterval: " + 
store.getBatchInterval() + " and expected batchsize: 50 ", 
store.getBatchSize()== 50);
         assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + 
store.getSynchronousDiskWrite() + " and expected isPersistent: false", 
store.getSynchronousDiskWrite()== false);
         assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " 
+ store.getWriteOnlyFileRolloverInterval() + " and expected 
getFileRolloverInterval: 10", store.getWriteOnlyFileRolloverInterval() == 10);
-        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + 
store.getMaxWriteOnlyFileSize() + " and expected getMaxFileSize: 1MB", 
store.getMaxWriteOnlyFileSize() == 1);
+        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + 
store.getWriteOnlyFileSizeLimit() + " and expected getMaxFileSize: 1MB", 
store.getWriteOnlyFileSizeLimit() == 1);
         this.c.close();
       } finally {
         this.c.close();
@@ -187,7 +187,7 @@ public class HDFSConfigJUnitTest extends TestCase {
         assertEquals(false, r1.getAttributes().getHDFSWriteOnly());
         assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + 
store.getDiskStoreName() + " and expected getDiskStoreName: null", 
store.getDiskStoreName()== null);
         assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " 
+ store.getWriteOnlyFileRolloverInterval() + " and expected 
getFileRolloverInterval: 3600", store.getWriteOnlyFileRolloverInterval() == 
3600);
-        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + 
store.getMaxWriteOnlyFileSize() + " and expected getMaxFileSize: 256MB", 
store.getMaxWriteOnlyFileSize() == 256);
+        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + 
store.getWriteOnlyFileSizeLimit() + " and expected getMaxFileSize: 256MB", 
store.getWriteOnlyFileSizeLimit() == 256);
         
         this.c.close();
         
@@ -265,7 +265,7 @@ public class HDFSConfigJUnitTest extends TestCase {
         assertTrue("Mismatch in attributes, actual.batchInterval: " + 
store.getBatchInterval() + " and expected batchsize: 50", 
store.getBatchInterval()== 50);
         assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + 
store.getSynchronousDiskWrite() + " and expected isDiskSynchronous: false", 
store.getSynchronousDiskWrite()== false);
         assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " 
+ store.getWriteOnlyFileRolloverInterval() + " and expected 
getFileRolloverInterval: 10", store.getWriteOnlyFileRolloverInterval() == 10);
-        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + 
store.getMaxWriteOnlyFileSize() + " and expected getMaxFileSize: 1MB", 
store.getMaxWriteOnlyFileSize() == 1);
+        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + 
store.getWriteOnlyFileSizeLimit() + " and expected getMaxFileSize: 1MB", 
store.getWriteOnlyFileSizeLimit() == 1);
         
         this.c.close();
       } finally {
@@ -303,9 +303,9 @@ public class HDFSConfigJUnitTest extends TestCase {
     
     assertTrue("compaction auto-compact mismatch.", 
store.getMinorCompaction());
     assertTrue("compaction auto-major-compact mismatch.", 
store.getMajorCompaction());
-    assertEquals("compaction max-input-file-size mismatch.", 512, 
store.getMaxInputFileSizeMB());
-    assertEquals("compaction min-input-file-count.", 4, 
store.getMinInputFileCount());
-    assertEquals("compaction max-iteration-size.", 10, 
store.getMaxInputFileCount());
+    assertEquals("compaction max-input-file-size mismatch.", 512, 
store.getInputFileSizeMax());
+    assertEquals("compaction min-input-file-count.", 4, 
store.getInputFileCountMin());
+    assertEquals("compaction max-iteration-size.", 10, 
store.getInputFileCountMax());
     assertEquals("compaction max-concurrency", 10, 
store.getMinorCompactionThreads());
     assertEquals("compaction max-major-concurrency", 2, 
store.getMajorCompactionThreads());
     assertEquals("compaction major-interval", 720, 
store.getMajorCompactionInterval());
@@ -339,19 +339,19 @@ public class HDFSConfigJUnitTest extends TestCase {
     hsf = this.c.createHDFSStoreFactory();
     
     try {
-      hsf.setMaxInputFileSizeMB(-1);
+      hsf.setInputFileSizeMax(-1);
       fail("validation failed");
     } catch (IllegalArgumentException e) {
       //expected
     }
     try {
-      hsf.setMinInputFileCount(-1);
+      hsf.setInputFileCountMin(-1);
       fail("validation failed");
     } catch (IllegalArgumentException e) {
       //expected
     }
     try {
-      hsf.setMaxInputFileCount(-1);
+      hsf.setInputFileCountMax(-1);
       //expected
       fail("validation failed");
     } catch (IllegalArgumentException e) {
@@ -381,16 +381,16 @@ public class HDFSConfigJUnitTest extends TestCase {
       //expected
     }
     try {
-      hsf.setMinInputFileCount(2);
-      hsf.setMaxInputFileCount(1);
+      hsf.setInputFileCountMin(2);
+      hsf.setInputFileCountMax(1);
       hsf.create("test");
       fail("validation failed");
     } catch (IllegalArgumentException e) {
       //expected
     }
     try {
-      hsf.setMaxInputFileCount(1);
-      hsf.setMinInputFileCount(2);
+      hsf.setInputFileCountMax(1);
+      hsf.setInputFileCountMin(2);
       hsf.create("test");
       fail("validation failed");
     } catch (IllegalArgumentException e) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
index f936b43..1b50345 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
@@ -19,11 +19,11 @@ public class HdfsStoreMutatorJUnitTest extends 
BaseHoplogTestCase {
   public void testMutatorInitialState() {
     HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
     assertEquals(-1, mutator.getWriteOnlyFileRolloverInterval());
-    assertEquals(-1, mutator.getMaxWriteOnlyFileSize());
+    assertEquals(-1, mutator.getWriteOnlyFileSizeLimit());
     
-    assertEquals(-1, mutator.getMaxInputFileCount());
-    assertEquals(-1, mutator.getMaxInputFileSizeMB());
-    assertEquals(-1, mutator.getMinInputFileCount());
+    assertEquals(-1, mutator.getInputFileCountMax());
+    assertEquals(-1, mutator.getInputFileSizeMax());
+    assertEquals(-1, mutator.getInputFileCountMin());
     assertEquals(-1, mutator.getMinorCompactionThreads());
     assertNull(mutator.getMinorCompaction());
     
@@ -47,26 +47,26 @@ public class HdfsStoreMutatorJUnitTest extends 
BaseHoplogTestCase {
       // expected
     }
     try {
-      mutator.setMaxWriteOnlyFileSize(-5);
+      mutator.setWriteOnlyFileSizeLimit(-5);
       fail();
     } catch (IllegalArgumentException e) {
       // expected
     }
     
     try {
-      mutator.setMinInputFileCount(-1);
+      mutator.setInputFileCountMin(-1);
       fail();
     } catch (IllegalArgumentException e) {
       // expected
     }
     try {
-      mutator.setMaxInputFileCount(-1);
+      mutator.setInputFileCountMax(-1);
       fail();
     } catch (IllegalArgumentException e) {
       // expected
     }
     try {
-      mutator.setMaxInputFileSizeMB(-1);
+      mutator.setInputFileSizeMax(-1);
       fail();
     } catch (IllegalArgumentException e) {
       // expected
@@ -109,8 +109,8 @@ public class HdfsStoreMutatorJUnitTest extends 
BaseHoplogTestCase {
     }
 */    
     try {
-      mutator.setMinInputFileCount(10);
-      mutator.setMaxInputFileCount(5);
+      mutator.setInputFileCountMin(10);
+      mutator.setInputFileCountMax(5);
       hdfsStore.alter(mutator);
       fail();
     } catch (IllegalArgumentException e) {
@@ -122,11 +122,11 @@ public class HdfsStoreMutatorJUnitTest extends 
BaseHoplogTestCase {
     HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
     
     mutator.setWriteOnlyFileRolloverInterval(121);
-    mutator.setMaxWriteOnlyFileSize(234);
+    mutator.setWriteOnlyFileSizeLimit(234);
     
-    mutator.setMaxInputFileCount(87);
-    mutator.setMaxInputFileSizeMB(45);
-    mutator.setMinInputFileCount(34);
+    mutator.setInputFileCountMax(87);
+    mutator.setInputFileSizeMax(45);
+    mutator.setInputFileCountMin(34);
     mutator.setMinorCompactionThreads(843);
     mutator.setMinorCompaction(false);
 
@@ -140,11 +140,11 @@ public class HdfsStoreMutatorJUnitTest extends 
BaseHoplogTestCase {
     mutator.setBatchInterval(695);
     
     assertEquals(121, mutator.getWriteOnlyFileRolloverInterval());
-    assertEquals(234, mutator.getMaxWriteOnlyFileSize());
+    assertEquals(234, mutator.getWriteOnlyFileSizeLimit());
     
-    assertEquals(87, mutator.getMaxInputFileCount());
-    assertEquals(45, mutator.getMaxInputFileSizeMB());
-    assertEquals(34, mutator.getMinInputFileCount());
+    assertEquals(87, mutator.getInputFileCountMax());
+    assertEquals(45, mutator.getInputFileSizeMax());
+    assertEquals(34, mutator.getInputFileCountMin());
     assertEquals(843, mutator.getMinorCompactionThreads());
     assertFalse(mutator.getMinorCompaction());
     
@@ -159,11 +159,11 @@ public class HdfsStoreMutatorJUnitTest extends 
BaseHoplogTestCase {
     
     // repeat the cycle once more
     mutator.setWriteOnlyFileRolloverInterval(14);
-    mutator.setMaxWriteOnlyFileSize(56);
+    mutator.setWriteOnlyFileSizeLimit(56);
     
-    mutator.setMaxInputFileCount(93);
-    mutator.setMaxInputFileSizeMB(85);
-    mutator.setMinInputFileCount(64);
+    mutator.setInputFileCountMax(93);
+    mutator.setInputFileSizeMax(85);
+    mutator.setInputFileCountMin(64);
     mutator.setMinorCompactionThreads(59);
     mutator.setMinorCompaction(true);
     
@@ -174,11 +174,11 @@ public class HdfsStoreMutatorJUnitTest extends 
BaseHoplogTestCase {
     mutator.setPurgeInterval(328);
     
     assertEquals(14, mutator.getWriteOnlyFileRolloverInterval());
-    assertEquals(56, mutator.getMaxWriteOnlyFileSize());
+    assertEquals(56, mutator.getWriteOnlyFileSizeLimit());
     
-    assertEquals(93, mutator.getMaxInputFileCount());
-    assertEquals(85, mutator.getMaxInputFileSizeMB());
-    assertEquals(64, mutator.getMinInputFileCount());
+    assertEquals(93, mutator.getInputFileCountMax());
+    assertEquals(85, mutator.getInputFileSizeMax());
+    assertEquals(64, mutator.getInputFileCountMin());
     assertEquals(59, mutator.getMinorCompactionThreads());
     assertTrue(mutator.getMinorCompaction());
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
index 5bd258e..7b45952 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
@@ -48,7 +48,7 @@ public class TieredCompactionJUnitTest extends 
BaseHoplogTestCase {
   protected void configureHdfsStoreFactory() throws Exception {
     super.configureHdfsStoreFactory();
     
-    hsf.setMinInputFileCount(3);
+    hsf.setInputFileCountMin(3);
     hsf.setMinorCompaction(false);
     hsf.setMajorCompaction(false);
   }
@@ -346,7 +346,7 @@ public class TieredCompactionJUnitTest extends 
BaseHoplogTestCase {
     TestHoplog thirdHop = (TestHoplog) targets.get(2).get();
 
     // oldest is more than max size is ignored 
-    oldestHop.get().size = HDFSStore.DEFAULT_MAX_INPUT_FILE_SIZE_MB * ONE_MB + 
100;
+    oldestHop.get().size = HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB + 
100;
     List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) 
targets.clone();
     compactor.getMinorCompactionTargets(list, -1);
     assertEquals(4, list.size());
@@ -355,7 +355,7 @@ public class TieredCompactionJUnitTest extends 
BaseHoplogTestCase {
     }
     
     // third is more than max size but is not ignored
-    thirdHop.size = HDFSStore.DEFAULT_MAX_INPUT_FILE_SIZE_MB * ONE_MB + 100;
+    thirdHop.size = HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB + 100;
     oldestHop.increment();
     list = (List<TrackedReference<Hoplog>>) targets.clone();
     compactor.getMinorCompactionTargets(list, -1);
@@ -365,7 +365,7 @@ public class TieredCompactionJUnitTest extends 
BaseHoplogTestCase {
       if (i != 2) {
         assertTrue(((TestHoplog) ref.get()).size - TEN_MB < 5);
       } else {
-        assertTrue(((TestHoplog) ref.get()).size > 
HDFSStore.DEFAULT_MAX_INPUT_FILE_SIZE_MB * ONE_MB);
+        assertTrue(((TestHoplog) ref.get()).size > 
HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB);
       }
       i++;
     }
@@ -375,7 +375,7 @@ public class TieredCompactionJUnitTest extends 
BaseHoplogTestCase {
     HdfsSortedOplogOrganizer organizer = new 
HdfsSortedOplogOrganizer(regionManager, 0);
     HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
 
-    assertTrue(TEN_MB * 2 < hdfsStore.getMaxInputFileSizeMB() * ONE_MB);
+    assertTrue(TEN_MB * 2 < hdfsStore.getInputFileSizeMax() * ONE_MB);
     
     ArrayList<TrackedReference<TestHoplog>> targets = new 
ArrayList<TrackedReference<TestHoplog>>();
     for (int i = 0; i < 5; i++) {
@@ -389,7 +389,7 @@ public class TieredCompactionJUnitTest extends 
BaseHoplogTestCase {
     assertEquals(targets.size(), list.size());
     
     HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    mutator.setMaxInputFileSizeMB(1);
+    mutator.setInputFileSizeMax(1);
     hdfsStore.alter(mutator);
     
     compactor.getMinorCompactionTargets(list, -1);
@@ -400,7 +400,7 @@ public class TieredCompactionJUnitTest extends 
BaseHoplogTestCase {
     HdfsSortedOplogOrganizer organizer = new 
HdfsSortedOplogOrganizer(regionManager, 0);
     HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
     
-    assertTrue(2 < hdfsStore.getMaxInputFileCount());
+    assertTrue(2 < hdfsStore.getInputFileCountMax());
     
     ArrayList<TrackedReference<TestHoplog>> targets = new 
ArrayList<TrackedReference<TestHoplog>>();
     for (int i = 0; i < 5; i++) {
@@ -414,8 +414,8 @@ public class TieredCompactionJUnitTest extends 
BaseHoplogTestCase {
     assertEquals(targets.size(), list.size());
     
     HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    mutator.setMaxInputFileCount(2);
-    mutator.setMinInputFileCount(2);
+    mutator.setInputFileCountMax(2);
+    mutator.setInputFileCountMin(2);
     hdfsStore.alter(mutator);
     
     compactor.getMinorCompactionTargets(list, -1);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
index 0cf5668..a209b6e 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
@@ -37,7 +37,7 @@ public class HoplogUtilJUnitTest extends BaseHoplogTestCase {
   protected void configureHdfsStoreFactory() throws Exception {
     super.configureHdfsStoreFactory();
     
-    hsf.setMinInputFileCount(3);
+    hsf.setInputFileCountMin(3);
     hsf.setMinorCompaction(false);
     hsf.setMajorCompaction(false);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
index 52d7073..6594311 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
@@ -742,7 +742,7 @@ public class HDFSStoreCommandsJUnitTest {
         will(returnValue(namenode));
         allowing(mockStore).getHomeDir();
         will(returnValue(homeDir));
-        allowing(mockStore).getMaxWriteOnlyFileSize();
+        allowing(mockStore).getWriteOnlyFileSizeLimit();
         will(returnValue(maxFileSize));
         allowing(mockStore).getWriteOnlyFileRolloverInterval();
         will(returnValue(fileRolloverInterval));
@@ -758,11 +758,11 @@ public class HDFSStoreCommandsJUnitTest {
         will(returnValue(minorCompactionThreads));
         allowing(mockStore).getPurgeInterval();
         will(returnValue(purgeInterval));
-        allowing(mockStore).getMaxInputFileCount();
+        allowing(mockStore).getInputFileCountMax();
         will(returnValue(10));
-        allowing(mockStore).getMaxInputFileSizeMB();
+        allowing(mockStore).getInputFileSizeMax();
         will(returnValue(1024));
-        allowing(mockStore).getMinInputFileCount();
+        allowing(mockStore).getInputFileCountMin();
         will(returnValue(2));
         allowing(mockStore).getBlockCacheSize();
         will(returnValue(blockCachesize));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5a90bf11/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
index 2d2378f..8d7b953 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
@@ -121,7 +121,7 @@ public class DescribeHDFSStoreFunctionJUnitTest {
     assertEquals(hdfsStoreName, hdfsStoreDetails.getName());
     assertEquals("hdfs://localhost:9000", hdfsStoreDetails.getNameNodeURL());
     assertEquals("testDir", hdfsStoreDetails.getHomeDir());
-    assertEquals(1024, hdfsStoreDetails.getMaxWriteOnlyFileSize());
+    assertEquals(1024, hdfsStoreDetails.getWriteOnlyFileSizeLimit());
     assertEquals(20, hdfsStoreDetails.getWriteOnlyFileRolloverInterval());
     assertFalse(hdfsStoreDetails.getMinorCompaction());
     assertEquals("0.25", Float.toString(hdfsStoreDetails.getBlockCacheSize()));
@@ -267,11 +267,11 @@ public class DescribeHDFSStoreFunctionJUnitTest {
         will(returnValue(minorCompactionThreads));
         oneOf(mockHdfsStore).getPurgeInterval();
         will(returnValue(purgeInterval));
-        oneOf(mockHdfsStore).getMaxInputFileCount();
+        oneOf(mockHdfsStore).getInputFileCountMax();
         will(returnValue(10));
-        oneOf(mockHdfsStore).getMaxInputFileSizeMB();
+        oneOf(mockHdfsStore).getInputFileSizeMax();
         will(returnValue(1024));
-        oneOf(mockHdfsStore).getMinInputFileCount();
+        oneOf(mockHdfsStore).getInputFileCountMin();
         will(returnValue(2));
         oneOf(mockHdfsStore).getBatchSize();
         will(returnValue(batchSize));
@@ -293,7 +293,7 @@ public class DescribeHDFSStoreFunctionJUnitTest {
         will(returnValue(namenode));
         oneOf(mockHdfsStore).getHomeDir();
         will(returnValue(homeDir));
-        oneOf(mockHdfsStore).getMaxWriteOnlyFileSize();
+        oneOf(mockHdfsStore).getWriteOnlyFileSizeLimit();
         will(returnValue(maxFileSize));
         oneOf(mockHdfsStore).getWriteOnlyFileRolloverInterval();
         will(returnValue(fileRolloverInterval));

Reply via email to