ignite-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sboi...@apache.org
Subject [16/38] incubator-ignite git commit: # Renaming
Date Fri, 05 Dec 2014 08:44:43 GMT
# Renaming


Project: http://git-wip-us.apache.org/repos/asf/incubator-ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ignite/commit/cd01ed99
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ignite/tree/cd01ed99
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ignite/diff/cd01ed99

Branch: refs/heads/master
Commit: cd01ed994cbc24dbe143a8517d768dae87c8a312
Parents: f3d9376
Author: sboikov <sboikov@gridgain.com>
Authored: Fri Dec 5 11:16:11 2014 +0300
Committer: sboikov <sboikov@gridgain.com>
Committed: Fri Dec 5 11:16:11 2014 +0300

----------------------------------------------------------------------
 examples/config/filesystem/example-ggfs.xml     |   2 +-
 .../org/gridgain/examples/ggfs/GgfsExample.java |   4 +-
 .../src/main/java/org/apache/ignite/Ignite.java |  13 +-
 .../main/java/org/apache/ignite/IgniteFs.java   |   6 +-
 .../configuration/IgniteConfiguration.java      |   6 +-
 ...GridGgfsConcurrentModificationException.java |  28 -
 .../grid/ggfs/GridGgfsConfiguration.java        | 801 -------------------
 .../ggfs/GridGgfsCorruptedFileException.java    |   2 +-
 .../gridgain/grid/ggfs/GridGgfsException.java   |  49 --
 .../GridGgfsInvalidHdfsVersionException.java    |   2 +-
 .../grid/ggfs/GridGgfsInvalidPathException.java |   2 +-
 .../org/gridgain/grid/ggfs/GridGgfsMetrics.java |   2 +-
 .../org/gridgain/grid/ggfs/GridGgfsMode.java    |   8 +-
 .../grid/ggfs/GridGgfsOutOfSpaceException.java  |   2 +-
 ...IgniteFsConcurrentModificationException.java |  28 +
 .../grid/ggfs/IgniteFsConfiguration.java        | 801 +++++++++++++++++++
 .../gridgain/grid/ggfs/IgniteFsException.java   |  49 ++
 .../grid/ggfs/mapreduce/GridGgfsTask.java       |   2 +-
 .../grid/ggfs/mapreduce/GridGgfsTaskArgs.java   |   2 +-
 .../org/gridgain/grid/kernal/GridGainEx.java    |   6 +-
 .../ggfs/common/GridGgfsControlResponse.java    |   4 +-
 .../processors/cache/GridCacheAdapter.java      |   4 +-
 .../processors/cache/GridCacheProcessor.java    |   4 +-
 .../kernal/processors/cache/GridCacheUtils.java |   4 +-
 .../processors/ggfs/GridGgfsAsyncImpl.java      |   2 +-
 .../kernal/processors/ggfs/GridGgfsContext.java |   6 +-
 .../processors/ggfs/GridGgfsDataManager.java    |   6 +-
 .../GridGgfsDirectoryNotEmptyException.java     |   2 +-
 .../processors/ggfs/GridGgfsFileInfo.java       |   4 +-
 .../kernal/processors/ggfs/GridGgfsImpl.java    |  10 +-
 .../processors/ggfs/GridGgfsMetaManager.java    |  38 +-
 .../ggfs/GridGgfsOutputStreamImpl.java          |   2 +-
 .../processors/ggfs/GridGgfsProcessor.java      |  12 +-
 .../processors/ggfs/GridGgfsServerManager.java  |   4 +-
 .../visor/node/VisorGgfsConfiguration.java      |   6 +-
 .../grid/kernal/visor/util/VisorTaskUtils.java  |   2 +-
 modules/core/src/test/config/ggfs-loopback.xml  |   2 +-
 .../core/src/test/config/ggfs-no-endpoint.xml   |   2 +-
 modules/core/src/test/config/ggfs-shmem.xml     |   2 +-
 .../ggfs/GridGgfsEventsAbstractSelfTest.java    |   6 +-
 .../GridGgfsFragmentizerAbstractSelfTest.java   |   2 +-
 ...heGgfsPerBlockLruEvictionPolicySelfTest.java |   4 +-
 .../ggfs/GridGgfsAbstractSelfTest.java          |   6 +-
 .../processors/ggfs/GridGgfsCacheSelfTest.java  |   2 +-
 .../ggfs/GridGgfsDataManagerSelfTest.java       |   8 +-
 .../ggfs/GridGgfsMetaManagerSelfTest.java       |   6 +-
 .../ggfs/GridGgfsMetricsSelfTest.java           |   4 +-
 .../processors/ggfs/GridGgfsModesSelfTest.java  |   4 +-
 .../ggfs/GridGgfsProcessorSelfTest.java         |  24 +-
 .../GridGgfsProcessorValidationSelfTest.java    |  32 +-
 ...IpcEndpointRegistrationAbstractSelfTest.java |   6 +-
 ...dpointRegistrationOnLinuxAndMacSelfTest.java |   2 +-
 .../processors/ggfs/GridGgfsSizeSelfTest.java   |   2 +-
 .../ggfs/GridGgfsStreamsSelfTest.java           |   2 +-
 .../processors/ggfs/GridGgfsTaskSelfTest.java   |   2 +-
 .../GridGgfsAbstractRecordResolverSelfTest.java |   2 +-
 .../shmem/GridIpcSharedMemoryNodeStartup.java   |   2 +-
 .../ggfs/hadoop/GridGgfsHadoopParameters.java   |   4 +-
 .../hadoop/v1/GridGgfsHadoopFileSystem.java     |   8 +-
 .../hadoop/v2/GridGgfsHadoopFileSystem.java     |   2 +-
 .../ggfs/hadoop/GridGgfsHadoopEndpoint.java     |   2 +-
 .../hadoop/GridGgfsHadoopFileSystemWrapper.java |  10 +-
 .../fs/GridHadoopDistributedFileSystem.java     |   2 +-
 .../grid/ggfs/GridGgfsEventsTestSuite.java      |  40 +-
 ...dGgfsHadoop20FileSystemAbstractSelfTest.java |   8 +-
 .../GridGgfsHadoopDualAbstractSelfTest.java     |   2 +-
 ...ridGgfsHadoopFileSystemAbstractSelfTest.java |   8 +-
 .../GridGgfsHadoopFileSystemClientSelfTest.java |   2 +-
 ...idGgfsHadoopFileSystemHandshakeSelfTest.java |   2 +-
 ...ridGgfsHadoopFileSystemIpcCacheSelfTest.java |   4 +-
 ...GgfsHadoopFileSystemLoggerStateSelfTest.java |   2 +-
 ...fsHadoopFileSystemSecondaryModeSelfTest.java |   4 +-
 .../ggfs/GridGgfsNearOnlyMultiNodeSelfTest.java |   2 +-
 .../hadoop/GridHadoopAbstractSelfTest.java      |   4 +-
 ...idHadoopDefaultMapReducePlannerSelfTest.java |   2 +-
 .../hadoop/GridHadoopTaskExecutionSelfTest.java |   4 +-
 76 files changed, 1078 insertions(+), 1081 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/examples/config/filesystem/example-ggfs.xml
----------------------------------------------------------------------
diff --git a/examples/config/filesystem/example-ggfs.xml b/examples/config/filesystem/example-ggfs.xml
index c038506..9c6ec1c 100644
--- a/examples/config/filesystem/example-ggfs.xml
+++ b/examples/config/filesystem/example-ggfs.xml
@@ -62,7 +62,7 @@
 
         <property name="ggfsConfiguration">
             <list>
-                <bean class="org.gridgain.grid.ggfs.GridGgfsConfiguration">
+                <bean class="org.gridgain.grid.ggfs.IgniteFsConfiguration">
                     <property name="name" value="ggfs"/>
                     <property name="metaCacheName" value="ggfs-meta"/>
                     <property name="dataCacheName" value="ggfs-data"/>

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/examples/src/main/java/org/gridgain/examples/ggfs/GgfsExample.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/gridgain/examples/ggfs/GgfsExample.java b/examples/src/main/java/org/gridgain/examples/ggfs/GgfsExample.java
index 307d582..8049b75 100644
--- a/examples/src/main/java/org/gridgain/examples/ggfs/GgfsExample.java
+++ b/examples/src/main/java/org/gridgain/examples/ggfs/GgfsExample.java
@@ -113,7 +113,7 @@ public final class GgfsExample {
                 System.out.println();
                 System.out.println(">>> Deleted " + (isFile ? "file" : "directory") + ": " + path);
             }
-            catch (GridGgfsException e) {
+            catch (IgniteFsException e) {
                 System.out.println();
                 System.out.println(">>> Failed to delete " + (isFile ? "file" : "directory") + " [path=" + path +
                     ", msg=" + e.getMessage() + ']');
@@ -142,7 +142,7 @@ public final class GgfsExample {
             System.out.println();
             System.out.println(">>> Created directory: " + path);
         }
-        catch (GridGgfsException e) {
+        catch (IgniteFsException e) {
             System.out.println();
             System.out.println(">>> Failed to create a directory [path=" + path + ", msg=" + e.getMessage() + ']');
         }

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/apache/ignite/Ignite.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/Ignite.java b/modules/core/src/main/java/org/apache/ignite/Ignite.java
index 2cc3f9c..5c387e2 100644
--- a/modules/core/src/main/java/org/apache/ignite/Ignite.java
+++ b/modules/core/src/main/java/org/apache/ignite/Ignite.java
@@ -16,7 +16,6 @@ import org.apache.ignite.product.*;
 import org.gridgain.grid.*;
 import org.gridgain.grid.cache.*;
 import org.gridgain.grid.dr.*;
-import org.gridgain.grid.ggfs.*;
 import org.gridgain.grid.hadoop.*;
 import org.gridgain.grid.security.*;
 import org.gridgain.grid.util.typedef.*;
@@ -215,9 +214,9 @@ public interface Ignite extends AutoCloseable {
      * @param <V> Value type.
      * @param name Cache name.
      * @return Cache instance for given name.
-     * @see GridGgfsConfiguration
-     * @see GridGgfsConfiguration#getDataCacheName()
-     * @see GridGgfsConfiguration#getMetaCacheName()
+     * @see org.gridgain.grid.ggfs.IgniteFsConfiguration
+     * @see org.gridgain.grid.ggfs.IgniteFsConfiguration#getDataCacheName()
+     * @see org.gridgain.grid.ggfs.IgniteFsConfiguration#getMetaCacheName()
      */
     public <K, V> GridCache<K, V> cache(@Nullable String name);
 
@@ -225,9 +224,9 @@ public interface Ignite extends AutoCloseable {
      * Gets all configured caches.
      * Caches that are used as GGFS meta and data caches will not be returned in resulting collection.
      *
-     * @see GridGgfsConfiguration
-     * @see GridGgfsConfiguration#getDataCacheName()
-     * @see GridGgfsConfiguration#getMetaCacheName()
+     * @see org.gridgain.grid.ggfs.IgniteFsConfiguration
+     * @see org.gridgain.grid.ggfs.IgniteFsConfiguration#getDataCacheName()
+     * @see org.gridgain.grid.ggfs.IgniteFsConfiguration#getMetaCacheName()
      * @return All configured caches.
      */
     public Collection<GridCache<?, ?>> caches();

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/apache/ignite/IgniteFs.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteFs.java b/modules/core/src/main/java/org/apache/ignite/IgniteFs.java
index d50afd2..21b28fb 100644
--- a/modules/core/src/main/java/org/apache/ignite/IgniteFs.java
+++ b/modules/core/src/main/java/org/apache/ignite/IgniteFs.java
@@ -67,7 +67,7 @@ public interface IgniteFs extends GridGgfsFileSystem, IgniteAsyncSupport {
      *
      * @return GGFS configuration.
      */
-    public GridGgfsConfiguration configuration();
+    public IgniteFsConfiguration configuration();
 
     /**
      * Gets summary (total number of files, total number of directories and total length)
@@ -274,7 +274,7 @@ public interface IgniteFs extends GridGgfsFileSystem, IgniteAsyncSupport {
 
     /**
      * Executes GGFS task with overridden maximum range length (see
-     * {@link GridGgfsConfiguration#getMaximumTaskRangeLength()} for more information).
+     * {@link org.gridgain.grid.ggfs.IgniteFsConfiguration#getMaximumTaskRangeLength()} for more information).
      * <p>
      * Supports asynchronous execution (see {@link IgniteAsyncSupport}).
      *
@@ -310,7 +310,7 @@ public interface IgniteFs extends GridGgfsFileSystem, IgniteAsyncSupport {
 
     /**
      * Executes GGFS task with overridden maximum range length (see
-     * {@link GridGgfsConfiguration#getMaximumTaskRangeLength()} for more information).
+     * {@link org.gridgain.grid.ggfs.IgniteFsConfiguration#getMaximumTaskRangeLength()} for more information).
      * <p>
      * Supports asynchronous execution (see {@link IgniteAsyncSupport}).
      *

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
index 72d3fda..4bfb26c 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
@@ -505,7 +505,7 @@ public class IgniteConfiguration {
     private String[] restAccessibleFolders;
 
     /** GGFS configuration. */
-    private GridGgfsConfiguration[] ggfsCfg;
+    private IgniteFsConfiguration[] ggfsCfg;
 
     /** Client message interceptor. */
     private GridClientMessageInterceptor clientMsgInterceptor;
@@ -2983,7 +2983,7 @@ public class IgniteConfiguration {
      *
      * @return GGFS configurations.
      */
-    public GridGgfsConfiguration[] getGgfsConfiguration() {
+    public IgniteFsConfiguration[] getGgfsConfiguration() {
         return ggfsCfg;
     }
 
@@ -2992,7 +2992,7 @@ public class IgniteConfiguration {
      *
      * @param ggfsCfg GGFS configurations.
      */
-    public void setGgfsConfiguration(GridGgfsConfiguration... ggfsCfg) {
+    public void setGgfsConfiguration(IgniteFsConfiguration... ggfsCfg) {
         this.ggfsCfg = ggfsCfg;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsConcurrentModificationException.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsConcurrentModificationException.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsConcurrentModificationException.java
deleted file mode 100644
index fa7c38d..0000000
--- a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsConcurrentModificationException.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/* @java.file.header */
-
-/*  _________        _____ __________________        _____
- *  __  ____/___________(_)______  /__  ____/______ ____(_)_______
- *  _  / __  __  ___/__  / _  __  / _  / __  _  __ `/__  / __  __ \
- *  / /_/ /  _  /    _  /  / /_/ /  / /_/ /  / /_/ / _  /  _  / / /
- *  \____/   /_/     /_/   \_,__/   \____/   \__,_/  /_/   /_/ /_/
- */
-
-package org.gridgain.grid.ggfs;
-
-/**
- * {@code GGFS} exception indicating that file system structure was modified concurrently. This error
- * indicates that an operation performed in DUAL mode cannot proceed due to these changes.
- */
-public class GridGgfsConcurrentModificationException extends GridGgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Creates new exception.
-     *
-     * @param path Affected path.
-     */
-    public GridGgfsConcurrentModificationException(IgniteFsPath path) {
-        super("File system entry has been modified concurrently: " + path, null);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsConfiguration.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsConfiguration.java
deleted file mode 100644
index dd97e28..0000000
--- a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsConfiguration.java
+++ /dev/null
@@ -1,801 +0,0 @@
-/* @java.file.header */
-
-/*  _________        _____ __________________        _____
- *  __  ____/___________(_)______  /__  ____/______ ____(_)_______
- *  _  / __  __  ___/__  / _  __  / _  / __  _  __ `/__  / __  __ \
- *  / /_/ /  _  /    _  /  / /_/ /  / /_/ /  / /_/ / _  /  _  / / /
- *  \____/   /_/     /_/   \_,__/   \____/   \__,_/  /_/   /_/ /_/
- */
-
-package org.gridgain.grid.ggfs;
-
-import org.gridgain.grid.ggfs.mapreduce.*;
-import org.gridgain.grid.util.typedef.internal.*;
-import org.jetbrains.annotations.*;
-
-import java.util.*;
-import java.util.concurrent.*;
-
-import static org.gridgain.grid.ggfs.GridGgfsMode.*;
-
-/**
- * {@code GGFS} configuration. More than one file system can be configured within grid.
- * {@code GGFS} configuration is provided via {@link org.apache.ignite.configuration.IgniteConfiguration#getGgfsConfiguration()}
- * method.
- * <p>
- * Refer to {@code config/hadoop/default-config.xml} or {@code config/hadoop/default-config-client.xml}
- * configuration files under GridGain installation to see sample {@code GGFS} configuration.
- */
-public class GridGgfsConfiguration {
-    /** Default file system user name. */
-    public static final String DFLT_USER_NAME = System.getProperty("user.name", "anonymous");
-
-    /** Default IPC port. */
-    public static final int DFLT_IPC_PORT = 10500;
-
-    /** Default fragmentizer throttling block length. */
-    public static final long DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH = 16 * 1024 * 1024;
-
-    /** Default fragmentizer throttling delay. */
-    public static final long DFLT_FRAGMENTIZER_THROTTLING_DELAY = 200;
-
-    /** Default fragmentizer concurrent files. */
-    public static final int DFLT_FRAGMENTIZER_CONCURRENT_FILES = 0;
-
-    /** Default fragmentizer local writes ratio. */
-    public static final float DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO = 0.8f;
-
-    /** Fragmentizer enabled property. */
-    public static final boolean DFLT_FRAGMENTIZER_ENABLED = true;
-
-    /** Default batch size for logging. */
-    public static final int DFLT_GGFS_LOG_BATCH_SIZE = 100;
-
-    /** Default {@code GGFS} log directory. */
-    public static final String DFLT_GGFS_LOG_DIR = "work/ggfs/log";
-
-    /** Default per node buffer size. */
-    public static final int DFLT_PER_NODE_BATCH_SIZE = 100;
-
-    /** Default number of per node parallel operations. */
-    public static final int DFLT_PER_NODE_PARALLEL_BATCH_CNT = 8;
-
-    /** Default GGFS mode. */
-    public static final GridGgfsMode DFLT_MODE = DUAL_ASYNC;
-
-    /** Default file's data block size (bytes). */
-    public static final int DFLT_BLOCK_SIZE = 1 << 16;
-
-    /** Default read/write buffers size (bytes). */
-    public static final int DFLT_BUF_SIZE = 1 << 16;
-
-    /** Default trash directory purge await timeout in case data cache oversize is detected. */
-    public static final long DFLT_TRASH_PURGE_TIMEOUT = 1000;
-
-    /** Default management port. */
-    public static final int DFLT_MGMT_PORT = 11400;
-
-    /** Default IPC endpoint enabled flag. */
-    public static final boolean DFLT_IPC_ENDPOINT_ENABLED = true;
-
-    /** GGFS instance name. */
-    private String name;
-
-    /** Cache name to store GGFS meta information. */
-    private String metaCacheName;
-
-    /** Cache name to store file's data blocks. */
-    private String dataCacheName;
-
-    /** File's data block size (bytes). */
-    private int blockSize = DFLT_BLOCK_SIZE;
-
-    /** The number of pre-fetched blocks if specific file's chunk is requested. */
-    private int prefetchBlocks;
-
-    /** Amount of sequential block reads before prefetch is triggered. */
-    private int seqReadsBeforePrefetch;
-
-    /** Read/write buffers size for stream operations (bytes). */
-    private int bufSize = DFLT_BUF_SIZE;
-
-    /** Per node buffer size. */
-    private int perNodeBatchSize = DFLT_PER_NODE_BATCH_SIZE;
-
-    /** Per node parallel operations. */
-    private int perNodeParallelBatchCnt = DFLT_PER_NODE_PARALLEL_BATCH_CNT;
-
-    /** IPC endpoint properties to publish GGFS over. */
-    private Map<String, String> ipcEndpointCfg;
-
-    /** IPC endpoint enabled flag. */
-    private boolean ipcEndpointEnabled = DFLT_IPC_ENDPOINT_ENABLED;
-
-    /** Management port. */
-    private int mgmtPort = DFLT_MGMT_PORT;
-
-    /** Secondary file system */
-    private GridGgfsFileSystem secondaryFs;
-
-    /** GGFS mode. */
-    private GridGgfsMode dfltMode = DFLT_MODE;
-
-    /** Fragmentizer throttling block length. */
-    private long fragmentizerThrottlingBlockLen = DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH;
-
-    /** Fragmentizer throttling delay. */
-    private long fragmentizerThrottlingDelay = DFLT_FRAGMENTIZER_THROTTLING_DELAY;
-
-    /** Fragmentizer concurrent files. */
-    private int fragmentizerConcurrentFiles = DFLT_FRAGMENTIZER_CONCURRENT_FILES;
-
-    /** Fragmentizer local writes ratio. */
-    private float fragmentizerLocWritesRatio = DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO;
-
-    /** Fragmentizer enabled flag. */
-    private boolean fragmentizerEnabled = DFLT_FRAGMENTIZER_ENABLED;
-
-    /** Path modes. */
-    private Map<String, GridGgfsMode> pathModes;
-
-    /** Maximum space. */
-    private long maxSpace;
-
-    /** Trash purge await timeout. */
-    private long trashPurgeTimeout = DFLT_TRASH_PURGE_TIMEOUT;
-
-    /** Dual mode PUT operations executor service. */
-    private ExecutorService dualModePutExec;
-
-    /** Dual mode PUT operations executor service shutdown flag. */
-    private boolean dualModePutExecShutdown;
-
-    /** Maximum amount of data in pending puts. */
-    private long dualModeMaxPendingPutsSize;
-
-    /** Maximum range length. */
-    private long maxTaskRangeLen;
-
-    /**
-     * Constructs default configuration.
-     */
-    public GridGgfsConfiguration() {
-        // No-op.
-    }
-
-    /**
-     * Constructs the copy of the configuration.
-     *
-     * @param cfg Configuration to copy.
-     */
-    public GridGgfsConfiguration(GridGgfsConfiguration cfg) {
-        assert cfg != null;
-
-        /*
-         * Must preserve alphabetical order!
-         */
-        blockSize = cfg.getBlockSize();
-        bufSize = cfg.getStreamBufferSize();
-        dataCacheName = cfg.getDataCacheName();
-        dfltMode = cfg.getDefaultMode();
-        dualModeMaxPendingPutsSize = cfg.getDualModeMaxPendingPutsSize();
-        dualModePutExec = cfg.getDualModePutExecutorService();
-        dualModePutExecShutdown = cfg.getDualModePutExecutorServiceShutdown();
-        fragmentizerConcurrentFiles = cfg.getFragmentizerConcurrentFiles();
-        fragmentizerLocWritesRatio = cfg.getFragmentizerLocalWritesRatio();
-        fragmentizerEnabled = cfg.isFragmentizerEnabled();
-        fragmentizerThrottlingBlockLen = cfg.getFragmentizerThrottlingBlockLength();
-        fragmentizerThrottlingDelay = cfg.getFragmentizerThrottlingDelay();
-        secondaryFs = cfg.getSecondaryFileSystem();
-        ipcEndpointCfg = cfg.getIpcEndpointConfiguration();
-        ipcEndpointEnabled = cfg.isIpcEndpointEnabled();
-        maxSpace = cfg.getMaxSpaceSize();
-        maxTaskRangeLen = cfg.getMaximumTaskRangeLength();
-        metaCacheName = cfg.getMetaCacheName();
-        mgmtPort = cfg.getManagementPort();
-        name = cfg.getName();
-        pathModes = cfg.getPathModes();
-        perNodeBatchSize = cfg.getPerNodeBatchSize();
-        perNodeParallelBatchCnt = cfg.getPerNodeParallelBatchCount();
-        prefetchBlocks = cfg.getPrefetchBlocks();
-        seqReadsBeforePrefetch = cfg.getSequentialReadsBeforePrefetch();
-        trashPurgeTimeout = cfg.getTrashPurgeTimeout();
-    }
-
-    /**
-     * Gets GGFS instance name. If {@code null}, then instance with default
-     * name will be used.
-     *
-     * @return GGFS instance name.
-     */
-    @Nullable public String getName() {
-        return name;
-    }
-
-    /**
-     * Sets GGFS instance name.
-     *
-     * @param name GGFS instance name.
-     */
-    public void setName(String name) {
-        this.name = name;
-    }
-
-    /**
-     * Cache name to store GGFS meta information. If {@code null}, then instance
-     * with default meta-cache name will be used.
-     *
-     * @return Cache name to store GGFS meta information.
-     */
-    @Nullable public String getMetaCacheName() {
-        return metaCacheName;
-    }
-
-    /**
-     * Sets cache name to store GGFS meta information.
-     *
-     * @param metaCacheName Cache name to store GGFS meta information.
-     */
-    public void setMetaCacheName(String metaCacheName) {
-        this.metaCacheName = metaCacheName;
-    }
-
-    /**
-     * Cache name to store GGFS data.
-     *
-     * @return Cache name to store GGFS data.
-     */
-    @Nullable public String getDataCacheName() {
-        return dataCacheName;
-    }
-
-    /**
-     * Sets cache name to store GGFS data.
-     *
-     * @param dataCacheName Cache name to store GGFS data.
-     */
-    public void setDataCacheName(String dataCacheName) {
-        this.dataCacheName = dataCacheName;
-    }
-
-    /**
-     * Get file's data block size.
-     *
-     * @return File's data block size.
-     */
-    public int getBlockSize() {
-        return blockSize;
-    }
-
-    /**
-     * Sets file's data block size.
-     *
-     * @param blockSize File's data block size (bytes) or {@code 0} to reset default value.
-     */
-    public void setBlockSize(int blockSize) {
-        A.ensure(blockSize >= 0, "blockSize >= 0");
-
-        this.blockSize = blockSize == 0 ? DFLT_BLOCK_SIZE : blockSize;
-    }
-
-    /**
-     * Get number of pre-fetched blocks if specific file's chunk is requested.
-     *
-     * @return The number of pre-fetched blocks.
-     */
-    public int getPrefetchBlocks() {
-        return prefetchBlocks;
-    }
-
-    /**
-     * Sets the number of pre-fetched blocks if specific file's chunk is requested.
-     *
-     * @param prefetchBlocks New number of pre-fetched blocks.
-     */
-    public void setPrefetchBlocks(int prefetchBlocks) {
-        A.ensure(prefetchBlocks >= 0, "prefetchBlocks >= 0");
-
-        this.prefetchBlocks = prefetchBlocks;
-    }
-
-    /**
-     * Get amount of sequential block reads before prefetch is triggered. The
-     * higher this value, the longer GGFS will wait before starting to prefetch
-     * values ahead of time. Depending on the use case, this can either help
-     * or hurt performance.
-     * <p>
-     * Default is {@code 0} which means that pre-fetching will start right away.
-     * <h1 class="header">Integration With Hadoop</h1>
-     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
-     * {@code org.gridgain.grid.ggfs.hadoop.GridGgfsHadoopParameters.PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH}
-     * configuration property directly to Hadoop MapReduce task.
-     * <p>
-     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
-     *
-     * @return Amount of sequential block reads.
-     */
-    public int getSequentialReadsBeforePrefetch() {
-        return seqReadsBeforePrefetch;
-    }
-
-    /**
-     * Sets amount of sequential block reads before prefetch is triggered. The
-     * higher this value, the longer GGFS will wait before starting to prefetch
-     * values ahead of time. Depending on the use case, this can either help
-     * or hurt performance.
-     * <p>
-     * Default is {@code 0} which means that pre-fetching will start right away.
-     * <h1 class="header">Integration With Hadoop</h1>
-     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
-     * {@code org.gridgain.grid.ggfs.hadoop.GridGgfsHadoopParameters.PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH}
-     * configuration property directly to Hadoop MapReduce task.
-     * <p>
-     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
-     *
-     * @param seqReadsBeforePrefetch Amount of sequential block reads before prefetch is triggered.
-     */
-    public void setSequentialReadsBeforePrefetch(int seqReadsBeforePrefetch) {
-        A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0");
-
-        this.seqReadsBeforePrefetch = seqReadsBeforePrefetch;
-    }
-
-    /**
-     * Get read/write buffer size for {@code GGFS} stream operations in bytes.
-     *
-     * @return Read/write buffers size (bytes).
-     */
-    public int getStreamBufferSize() {
-        return bufSize;
-    }
-
-    /**
-     * Sets read/write buffers size for {@code GGFS} stream operations (bytes).
-     *
-     * @param bufSize Read/write buffers size for stream operations (bytes) or {@code 0} to reset default value.
-     */
-    public void setStreamBufferSize(int bufSize) {
-        A.ensure(bufSize >= 0, "bufSize >= 0");
-
-        this.bufSize = bufSize == 0 ? DFLT_BUF_SIZE : bufSize;
-    }
-
-    /**
-     * Gets number of file blocks buffered on local node before sending batch to remote node.
-     *
-     * @return Per node buffer size.
-     */
-    public int getPerNodeBatchSize() {
-        return perNodeBatchSize;
-    }
-
-    /**
-     * Sets number of file blocks collected on local node before sending batch to remote node.
-     *
-     * @param perNodeBatchSize Per node buffer size.
-     */
-    public void setPerNodeBatchSize(int perNodeBatchSize) {
-        this.perNodeBatchSize = perNodeBatchSize;
-    }
-
-    /**
-     * Gets number of batches that can be concurrently sent to remote node.
-     *
-     * @return Number of batches for each node.
-     */
-    public int getPerNodeParallelBatchCount() {
-        return perNodeParallelBatchCnt;
-    }
-
-    /**
-     * Sets number of file block batches that can be concurrently sent to remote node.
-     *
-     * @param perNodeParallelBatchCnt Per node parallel load operations.
-     */
-    public void setPerNodeParallelBatchCount(int perNodeParallelBatchCnt) {
-        this.perNodeParallelBatchCnt = perNodeParallelBatchCnt;
-    }
-
-    /**
-     * Gets map of IPC endpoint configuration properties. There are 2 different
-     * types of endpoint supported: {@code shared-memory}, and {@code TCP}.
-     * <p>
-     * The following configuration properties are supported for {@code shared-memory}
-     * endpoint:
-     * <ul>
-     *     <li>{@code type} - value is {@code shmem} to specify {@code shared-memory} approach.</li>
-     *     <li>{@code port} - endpoint port.</li>
-     *     <li>{@code size} - memory size allocated for single endpoint communication.</li>
-     *     <li>
-     *         {@code tokenDirectoryPath} - path, either absolute or relative to {@code GRIDGAIN_HOME} to
-     *         store shared memory tokens.
-     *     </li>
-     * </ul>
-     * <p>
-     * The following configuration properties are supported for {@code TCP} approach:
-     * <ul>
-     *     <li>{@code type} - value is {@code tcp} to specify {@code TCP} approach.</li>
-     *     <li>{@code port} - endpoint bind port.</li>
-     *     <li>
-     *         {@code host} - endpoint bind host. If omitted '127.0.0.1' will be used.
-     *     </li>
-     * </ul>
-     * <p>
-     * Note that {@code shared-memory} approach is not supported on Windows environments.
-     * In case GGFS is failed to bind to particular port, further attempts will be performed every 3 seconds.
-     *
-     * @return Map of IPC endpoint configuration properties. In case the value is not set, defaults will be used. Default
-     * type for Windows is "tcp", for all other platforms - "shmem". Default port is {@link #DFLT_IPC_PORT}.
-     */
-    @Nullable public Map<String,String> getIpcEndpointConfiguration() {
-        return ipcEndpointCfg;
-    }
-
-    /**
-     * Sets IPC endpoint configuration to publish GGFS over.
-     *
-     * @param ipcEndpointCfg Map of IPC endpoint config properties.
-     */
-    public void setIpcEndpointConfiguration(@Nullable Map<String,String> ipcEndpointCfg) {
-        this.ipcEndpointCfg = ipcEndpointCfg;
-    }
-
-    /**
-     * Get IPC endpoint enabled flag. In case it is set to {@code true} endpoint will be created and bound to specific
-     * port. Otherwise endpoint will not be created. Default value is {@link #DFLT_IPC_ENDPOINT_ENABLED}.
-     *
-     * @return {@code True} in case endpoint is enabled.
-     */
-    public boolean isIpcEndpointEnabled() {
-        return ipcEndpointEnabled;
-    }
-
-    /**
-     * Set IPC endpoint enabled flag. See {@link #isIpcEndpointEnabled()}.
-     *
-     * @param ipcEndpointEnabled IPC endpoint enabled flag.
-     */
-    public void setIpcEndpointEnabled(boolean ipcEndpointEnabled) {
-        this.ipcEndpointEnabled = ipcEndpointEnabled;
-    }
-
-    /**
-     * Gets port number for management endpoint. All GGFS nodes should have this port open
-     * for Visor Management Console to work with GGFS.
-     * <p>
-     * Default value is {@link #DFLT_MGMT_PORT}
-     *
-     * @return Port number or {@code -1} if management endpoint should be disabled.
-     */
-    public int getManagementPort() {
-        return mgmtPort;
-    }
-
-    /**
-     * Sets management endpoint port.
-     *
-     * @param mgmtPort port number or {@code -1} to disable management endpoint.
-     */
-    public void setManagementPort(int mgmtPort) {
-        this.mgmtPort = mgmtPort;
-    }
-
-    /**
-     * Gets mode to specify how {@code GGFS} interacts with Hadoop file system, like {@code HDFS}.
-     * Secondary Hadoop file system is provided for pass-through, write-through, and read-through
-     * purposes.
-     * <p>
-     * Default mode is {@link GridGgfsMode#DUAL_ASYNC}. If secondary Hadoop file system is
-     * not configured, this mode will work just like {@link GridGgfsMode#PRIMARY} mode.
-     *
-     * @return Mode to specify how GGFS interacts with secondary HDFS file system.
-     */
-    public GridGgfsMode getDefaultMode() {
-        return dfltMode;
-    }
-
-    /**
-     * Sets {@code GGFS} mode to specify how it should interact with secondary
-     * Hadoop file system, like {@code HDFS}. Secondary Hadoop file system is provided
-     * for pass-through, write-through, and read-through purposes.
-     *
-     * @param dfltMode {@code GGFS} mode.
-     */
-    public void setDefaultMode(GridGgfsMode dfltMode) {
-        this.dfltMode = dfltMode;
-    }
-
-    /**
-     * Gets the secondary file system. Secondary file system is provided for pass-through, write-through,
-     * and read-through purposes.
-     *
-     * @return Secondary file system.
-     */
-    public GridGgfsFileSystem getSecondaryFileSystem() {
-        return secondaryFs;
-    }
-
-    /**
-     * Sets the secondary file system. Secondary file system is provided for pass-through, write-through,
-     * and read-through purposes.
-     *
-     * @param fileSystem
-     */
-    public void setSecondaryFileSystem(GridGgfsFileSystem fileSystem) {
-        secondaryFs = fileSystem;
-    }
-
-    /**
-     * Gets map of path prefixes to {@code GGFS} modes used for them.
-     * <p>
-     * If path doesn't correspond to any specified prefix or mappings are not provided, then
-     * {@link #getDefaultMode()} is used.
-     * <p>
-     * Several folders under {@code '/gridgain'} folder have predefined mappings which cannot be overridden.
-     * <li>{@code /gridgain/primary} and all it's sub-folders will always work in {@code PRIMARY} mode.</li>
-     * <p>
-     * And in case secondary file system URI is provided:
-     * <li>{@code /gridgain/proxy} and all it's sub-folders will always work in {@code PROXY} mode.</li>
-     * <li>{@code /gridgain/sync} and all it's sub-folders will always work in {@code DUAL_SYNC} mode.</li>
-     * <li>{@code /gridgain/async} and all it's sub-folders will always work in {@code DUAL_ASYNC} mode.</li>
-     *
-     * @return Map of paths to {@code GGFS} modes.
-     */
-    @Nullable public Map<String, GridGgfsMode> getPathModes() {
-        return pathModes;
-    }
-
-    /**
-     * Sets map of path prefixes to {@code GGFS} modes used for them.
-     * <p>
-     * If path doesn't correspond to any specified prefix or mappings are not provided, then
-     * {@link #getDefaultMode()} is used.
-     *
-     * @param pathModes Map of paths to {@code GGFS} modes.
-     */
-    public void setPathModes(Map<String, GridGgfsMode> pathModes) {
-        this.pathModes = pathModes;
-    }
-
-    /**
-     * Gets the length of file chunk to send before delaying the fragmentizer.
-     *
-     * @return File chunk length in bytes.
-     */
-    public long getFragmentizerThrottlingBlockLength() {
-        return fragmentizerThrottlingBlockLen;
-    }
-
-    /**
-     * Sets length of file chunk to transmit before throttling is delayed.
-     *
-     * @param fragmentizerThrottlingBlockLen Block length in bytes.
-     */
-    public void setFragmentizerThrottlingBlockLength(long fragmentizerThrottlingBlockLen) {
-        this.fragmentizerThrottlingBlockLen = fragmentizerThrottlingBlockLen;
-    }
-
-    /**
-     * Gets throttle delay for fragmentizer.
-     *
-     * @return Throttle delay in milliseconds.
-     */
-    public long getFragmentizerThrottlingDelay() {
-        return fragmentizerThrottlingDelay;
-    }
-
-    /**
-     * Sets delay in milliseconds for which fragmentizer is paused.
-     *
-     * @param fragmentizerThrottlingDelay Delay in milliseconds.
-     */
-    public void setFragmentizerThrottlingDelay(long fragmentizerThrottlingDelay) {
-        this.fragmentizerThrottlingDelay = fragmentizerThrottlingDelay;
-    }
-
-    /**
-     * Gets number of files that can be processed by fragmentizer concurrently.
-     *
-     * @return Number of files to process concurrently.
-     */
-    public int getFragmentizerConcurrentFiles() {
-        return fragmentizerConcurrentFiles;
-    }
-
-    /**
-     * Sets number of files to process concurrently by fragmentizer.
-     *
-     * @param fragmentizerConcurrentFiles Number of files to process concurrently.
-     */
-    public void setFragmentizerConcurrentFiles(int fragmentizerConcurrentFiles) {
-        this.fragmentizerConcurrentFiles = fragmentizerConcurrentFiles;
-    }
-
-    /**
-     * Gets amount of local memory (in % of local GGFS max space size) available for local writes
-     * during file creation.
-     * <p>
-     * If current GGFS space size is less than {@code fragmentizerLocalWritesRatio * maxSpaceSize},
-     * then file blocks will be written to the local node first and then asynchronously distributed
-     * among cluster nodes (fragmentized).
-     * <p>
-     * Default value is {@link #DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO}.
-     *
-     * @return Ratio for local writes space.
-     */
-    public float getFragmentizerLocalWritesRatio() {
-        return fragmentizerLocWritesRatio;
-    }
-
-    /**
-     * Sets ratio for space available for local file writes.
-     *
-     * @param fragmentizerLocWritesRatio Ratio for local file writes.
-     * @see #getFragmentizerLocalWritesRatio()
-     */
-    public void setFragmentizerLocalWritesRatio(float fragmentizerLocWritesRatio) {
-        this.fragmentizerLocWritesRatio = fragmentizerLocWritesRatio;
-    }
-
-    /**
-     * Gets flag indicating whether GGFS fragmentizer is enabled. If fragmentizer is disabled, files will be
-     * written in distributed fashion.
-     *
-     * @return Flag indicating whether fragmentizer is enabled.
-     */
-    public boolean isFragmentizerEnabled() {
-        return fragmentizerEnabled;
-    }
-
-    /**
-     * Sets property indicating whether fragmentizer is enabled.
-     *
-     * @param fragmentizerEnabled {@code True} if fragmentizer is enabled.
-     */
-    public void setFragmentizerEnabled(boolean fragmentizerEnabled) {
-        this.fragmentizerEnabled = fragmentizerEnabled;
-    }
-
-    /**
-     * Get maximum space available for data cache to store file system entries.
-     *
-     * @return Maximum space available for data cache.
-     */
-    public long getMaxSpaceSize() {
-        return maxSpace;
-    }
-
-    /**
-     * Set maximum space in bytes available in data cache.
-     *
-     * @param maxSpace Maximum space available in data cache.
-     */
-    public void setMaxSpaceSize(long maxSpace) {
-        this.maxSpace = maxSpace;
-    }
-
-    /**
-     * Gets maximum timeout awaiting for trash purging in case data cache oversize is detected.
-     *
-     * @return Maximum timeout awaiting for trash purging in case data cache oversize is detected.
-     */
-    public long getTrashPurgeTimeout() {
-        return trashPurgeTimeout;
-    }
-
-    /**
-     * Sets maximum timeout awaiting for trash purging in case data cache oversize is detected.
-     *
-     * @param trashPurgeTimeout Maximum timeout awaiting for trash purging in case data cache oversize is detected.
-     */
-    public void setTrashPurgeTimeout(long trashPurgeTimeout) {
-        this.trashPurgeTimeout = trashPurgeTimeout;
-    }
-
-    /**
-     * Get DUAL mode put operation executor service. This executor service will process cache PUT requests for
-     * data which came from the secondary file system and about to be written to GGFS data cache.
-     * In case no executor service is provided, default one will be created with maximum amount of threads equals
-     * to amount of processor cores.
-     *
-     * @return Get DUAL mode put operation executor service
-     */
-    @Nullable public ExecutorService getDualModePutExecutorService() {
-        return dualModePutExec;
-    }
-
-    /**
-     * Set DUAL mode put operations executor service.
-     *
-     * @param dualModePutExec Dual mode put operations executor service.
-     */
-    public void setDualModePutExecutorService(ExecutorService dualModePutExec) {
-        this.dualModePutExec = dualModePutExec;
-    }
-
-    /**
-     * Get DUAL mode put operation executor service shutdown flag.
-     *
-     * @return DUAL mode put operation executor service shutdown flag.
-     */
-    public boolean getDualModePutExecutorServiceShutdown() {
-        return dualModePutExecShutdown;
-    }
-
-    /**
-     * Set DUAL mode put operations executor service shutdown flag.
-     *
-     * @param dualModePutExecShutdown Dual mode put operations executor service shutdown flag.
-     */
-    public void setDualModePutExecutorServiceShutdown(boolean dualModePutExecShutdown) {
-        this.dualModePutExecShutdown = dualModePutExecShutdown;
-    }
-
-    /**
-     * Get maximum amount of pending data read from the secondary file system and waiting to be written to data
-     * cache. {@code 0} or negative value stands for unlimited size.
-     * <p>
-     * By default this value is set to {@code 0}. It is recommended to set positive value in case your
-     * application performs frequent reads of large amount of data from the secondary file system in order to
-     * avoid issues with increasing GC pauses or out-of-memory error.
-     *
-     * @return Maximum amount of pending data read from the secondary file system
-     */
-    public long getDualModeMaxPendingPutsSize() {
-        return dualModeMaxPendingPutsSize;
-    }
-
-    /**
-     * Set maximum amount of data in pending put operations.
-     *
-     * @param dualModeMaxPendingPutsSize Maximum amount of data in pending put operations.
-     */
-    public void setDualModeMaxPendingPutsSize(long dualModeMaxPendingPutsSize) {
-        this.dualModeMaxPendingPutsSize = dualModeMaxPendingPutsSize;
-    }
-
-    /**
-     * Get maximum default range size of a file being split during GGFS task execution. When GGFS task is about to
-     * be executed, it requests file block locations first. Each location is defined as {@link GridGgfsFileRange} which
-     * has length. In case this parameter is set to positive value, then GGFS will split single file range into smaller
-     * ranges with length not greater that this parameter. The only exception to this case is when maximum task range
-     * length is smaller than file block size. In this case maximum task range size will be overridden and set to file
-     * block size.
-     * <p>
-     * Note that this parameter is applied when task is split into jobs before {@link GridGgfsRecordResolver} is
-     * applied. Therefore, final file ranges being assigned to particular jobs could be greater than value of this
-     * parameter depending on file data layout and selected resolver type.
-     * <p>
-     * Setting this parameter might be useful when file is highly colocated and have very long consequent data chunks
-     * so that task execution suffers from insufficient parallelism. E.g., in case you have one GGFS node in topology
-     * and want to process 1Gb file, then only single range of length 1Gb will be returned. This will result in
-     * a single job which will be processed in one thread. But in case you provide this configuration parameter and set
-     * maximum range length to 16Mb, then 64 ranges will be returned resulting in 64 jobs which could be executed in
-     * parallel.
-     * <p>
-     * Note that some {@code GridGgfs.execute()} methods can override value of this parameter.
-     * <p>
-     * In case value of this parameter is set to {@code 0} or negative value, it is simply ignored. Default value is
-     * {@code 0}.
-     *
-     * @return Maximum range size of a file being split during GGFS task execution.
-     */
-    public long getMaximumTaskRangeLength() {
-        return maxTaskRangeLen;
-    }
-
-    /**
-     * Set maximum default range size of a file being split during GGFS task execution.
-     * See {@link #getMaximumTaskRangeLength()} for more details.
-     *
-     * @param maxTaskRangeLen Set maximum default range size of a file being split during GGFS task execution.
-     */
-    public void setMaximumTaskRangeLength(long maxTaskRangeLen) {
-        this.maxTaskRangeLen = maxTaskRangeLen;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(GridGgfsConfiguration.class, this);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsCorruptedFileException.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsCorruptedFileException.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsCorruptedFileException.java
index 63209a2..4159042 100644
--- a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsCorruptedFileException.java
+++ b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsCorruptedFileException.java
@@ -14,7 +14,7 @@ import org.jetbrains.annotations.*;
 /**
  * Exception thrown when target file's block is not found in data cache.
  */
-public class GridGgfsCorruptedFileException extends GridGgfsException {
+public class GridGgfsCorruptedFileException extends IgniteFsException {
     /** */
     private static final long serialVersionUID = 0L;
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsException.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsException.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsException.java
deleted file mode 100644
index dd315ab..0000000
--- a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsException.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/* @java.file.header */
-
-/*  _________        _____ __________________        _____
- *  __  ____/___________(_)______  /__  ____/______ ____(_)_______
- *  _  / __  __  ___/__  / _  __  / _  / __  _  __ `/__  / __  __ \
- *  / /_/ /  _  /    _  /  / /_/ /  / /_/ /  / /_/ / _  /  _  / / /
- *  \____/   /_/     /_/   \_,__/   \____/   \__,_/  /_/   /_/ /_/
- */
-
-package org.gridgain.grid.ggfs;
-
-import org.gridgain.grid.*;
-import org.jetbrains.annotations.*;
-
-/**
- * {@code GGFS} exception thrown by file system components.
- */
-public class GridGgfsException extends GridException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Creates an instance of GGFS exception with descriptive error message.
-     *
-     * @param msg Error message.
-     */
-    public GridGgfsException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Creates an instance of GGFS exception caused by nested exception.
-     *
-     * @param cause Exception cause.
-     */
-    public GridGgfsException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Creates an instance of GGFS exception with error message and underlying cause.
-     *
-     * @param msg Error message.
-     * @param cause Exception cause.
-     */
-    public GridGgfsException(String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsInvalidHdfsVersionException.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsInvalidHdfsVersionException.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsInvalidHdfsVersionException.java
index b0e0b3a..1bb28c2 100644
--- a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsInvalidHdfsVersionException.java
+++ b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsInvalidHdfsVersionException.java
@@ -13,7 +13,7 @@ package org.gridgain.grid.ggfs;
  * Exception thrown when GridGain detects that remote HDFS version differs from version of HDFS libraries
  * in GridGain classpath.
  */
-public class GridGgfsInvalidHdfsVersionException extends GridGgfsException {
+public class GridGgfsInvalidHdfsVersionException extends IgniteFsException {
     /** */
     private static final long serialVersionUID = 0L;
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsInvalidPathException.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsInvalidPathException.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsInvalidPathException.java
index 9125d53..76627a8 100644
--- a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsInvalidPathException.java
+++ b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsInvalidPathException.java
@@ -15,7 +15,7 @@ import org.jetbrains.annotations.*;
  * {@code GGFS} exception indicating that operation target is invalid
  * (e.g. not a file while expecting to be a file).
  */
-public class GridGgfsInvalidPathException extends GridGgfsException {
+public class GridGgfsInvalidPathException extends IgniteFsException {
     /** */
     private static final long serialVersionUID = 0L;
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsMetrics.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsMetrics.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsMetrics.java
index a50aea7..31fffea 100644
--- a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsMetrics.java
+++ b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsMetrics.java
@@ -25,7 +25,7 @@ public interface GridGgfsMetrics {
 
     /**
      * Gets maximum amount of data that can be stored on local node. This metrics is either
-     * equal to {@link GridGgfsConfiguration#getMaxSpaceSize()}, or, if it is {@code 0}, equal to
+     * equal to {@link IgniteFsConfiguration#getMaxSpaceSize()}, or, if it is {@code 0}, equal to
      * {@code 80%} of maximum heap size allocated for JVM.
      *
      * @return Maximum GGFS local space size.

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsMode.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsMode.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsMode.java
index 7dc44e1..8bca1e5 100644
--- a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsMode.java
+++ b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsMode.java
@@ -16,7 +16,7 @@ import org.jetbrains.annotations.*;
  * Secondary Hadoop file system is provided for pass-through, write-through, and
  * read-through purposes.
  * <p>
- * This mode is configured via {@link GridGgfsConfiguration#getDefaultMode()}
+ * This mode is configured via {@link IgniteFsConfiguration#getDefaultMode()}
  * configuration property.
  */
 public enum GridGgfsMode {
@@ -31,7 +31,7 @@ public enum GridGgfsMode {
      * through to secondary Hadoop file system. If this mode is enabled, then
      * secondary Hadoop file system must be configured.
      *
-     * @see GridGgfsConfiguration#getSecondaryHadoopFileSystemUri()
+     * @see IgniteFsConfiguration#getSecondaryHadoopFileSystemUri()
      */
     PROXY,
 
@@ -42,7 +42,7 @@ public enum GridGgfsMode {
      * If secondary Hadoop file system is not configured, then this mode behaves like
      * {@link #PRIMARY} mode.
      *
-     * @see GridGgfsConfiguration#getSecondaryHadoopFileSystemUri()
+     * @see IgniteFsConfiguration#getSecondaryHadoopFileSystemUri()
      */
     DUAL_SYNC,
 
@@ -53,7 +53,7 @@ public enum GridGgfsMode {
      * If secondary Hadoop file system is not configured, then this mode behaves like
      * {@link #PRIMARY} mode.
      *
-     * @see GridGgfsConfiguration#getSecondaryHadoopFileSystemUri()
+     * @see IgniteFsConfiguration#getSecondaryHadoopFileSystemUri()
      */
     DUAL_ASYNC;
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsOutOfSpaceException.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsOutOfSpaceException.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsOutOfSpaceException.java
index 6c75f14..ce5ef3d 100644
--- a/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsOutOfSpaceException.java
+++ b/modules/core/src/main/java/org/gridgain/grid/ggfs/GridGgfsOutOfSpaceException.java
@@ -16,7 +16,7 @@ import org.jetbrains.annotations.*;
  * It is thrown when number of writes written to a {@code GGFS} data nodes exceeds
  * its maximum value (that is configured per-node).
  */
-public class GridGgfsOutOfSpaceException extends GridGgfsException {
+public class GridGgfsOutOfSpaceException extends IgniteFsException {
     /** */
     private static final long serialVersionUID = 0L;
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsConcurrentModificationException.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsConcurrentModificationException.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsConcurrentModificationException.java
new file mode 100644
index 0000000..fa17228
--- /dev/null
+++ b/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsConcurrentModificationException.java
@@ -0,0 +1,28 @@
+/* @java.file.header */
+
+/*  _________        _____ __________________        _____
+ *  __  ____/___________(_)______  /__  ____/______ ____(_)_______
+ *  _  / __  __  ___/__  / _  __  / _  / __  _  __ `/__  / __  __ \
+ *  / /_/ /  _  /    _  /  / /_/ /  / /_/ /  / /_/ / _  /  _  / / /
+ *  \____/   /_/     /_/   \_,__/   \____/   \__,_/  /_/   /_/ /_/
+ */
+
+package org.gridgain.grid.ggfs;
+
+/**
+ * {@code GGFS} exception indicating that file system structure was modified concurrently. This error
+ * indicates that an operation performed in DUAL mode cannot proceed due to these changes.
+ */
+public class IgniteFsConcurrentModificationException extends IgniteFsException {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /**
+     * Creates new exception.
+     *
+     * @param path Affected path.
+     */
+    public IgniteFsConcurrentModificationException(IgniteFsPath path) {
+        super("File system entry has been modified concurrently: " + path, null);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsConfiguration.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsConfiguration.java
new file mode 100644
index 0000000..670a3a2
--- /dev/null
+++ b/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsConfiguration.java
@@ -0,0 +1,801 @@
+/* @java.file.header */
+
+/*  _________        _____ __________________        _____
+ *  __  ____/___________(_)______  /__  ____/______ ____(_)_______
+ *  _  / __  __  ___/__  / _  __  / _  / __  _  __ `/__  / __  __ \
+ *  / /_/ /  _  /    _  /  / /_/ /  / /_/ /  / /_/ / _  /  _  / / /
+ *  \____/   /_/     /_/   \_,__/   \____/   \__,_/  /_/   /_/ /_/
+ */
+
+package org.gridgain.grid.ggfs;
+
+import org.gridgain.grid.ggfs.mapreduce.*;
+import org.gridgain.grid.util.typedef.internal.*;
+import org.jetbrains.annotations.*;
+
+import java.util.*;
+import java.util.concurrent.*;
+
+import static org.gridgain.grid.ggfs.GridGgfsMode.*;
+
+/**
+ * {@code GGFS} configuration. More than one file system can be configured within grid.
+ * {@code GGFS} configuration is provided via {@link org.apache.ignite.configuration.IgniteConfiguration#getGgfsConfiguration()}
+ * method.
+ * <p>
+ * Refer to {@code config/hadoop/default-config.xml} or {@code config/hadoop/default-config-client.xml}
+ * configuration files under GridGain installation to see sample {@code GGFS} configuration.
+ */
+public class IgniteFsConfiguration {
+    /** Default file system user name. */
+    public static final String DFLT_USER_NAME = System.getProperty("user.name", "anonymous");
+
+    /** Default IPC port. */
+    public static final int DFLT_IPC_PORT = 10500;
+
+    /** Default fragmentizer throttling block length. */
+    public static final long DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH = 16 * 1024 * 1024;
+
+    /** Default fragmentizer throttling delay. */
+    public static final long DFLT_FRAGMENTIZER_THROTTLING_DELAY = 200;
+
+    /** Default fragmentizer concurrent files. */
+    public static final int DFLT_FRAGMENTIZER_CONCURRENT_FILES = 0;
+
+    /** Default fragmentizer local writes ratio. */
+    public static final float DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO = 0.8f;
+
+    /** Fragmentizer enabled property. */
+    public static final boolean DFLT_FRAGMENTIZER_ENABLED = true;
+
+    /** Default batch size for logging. */
+    public static final int DFLT_GGFS_LOG_BATCH_SIZE = 100;
+
+    /** Default {@code GGFS} log directory. */
+    public static final String DFLT_GGFS_LOG_DIR = "work/ggfs/log";
+
+    /** Default per node buffer size. */
+    public static final int DFLT_PER_NODE_BATCH_SIZE = 100;
+
+    /** Default number of per node parallel operations. */
+    public static final int DFLT_PER_NODE_PARALLEL_BATCH_CNT = 8;
+
+    /** Default GGFS mode. */
+    public static final GridGgfsMode DFLT_MODE = DUAL_ASYNC;
+
+    /** Default file's data block size (bytes). */
+    public static final int DFLT_BLOCK_SIZE = 1 << 16;
+
+    /** Default read/write buffers size (bytes). */
+    public static final int DFLT_BUF_SIZE = 1 << 16;
+
+    /** Default trash directory purge await timeout in case data cache oversize is detected. */
+    public static final long DFLT_TRASH_PURGE_TIMEOUT = 1000;
+
+    /** Default management port. */
+    public static final int DFLT_MGMT_PORT = 11400;
+
+    /** Default IPC endpoint enabled flag. */
+    public static final boolean DFLT_IPC_ENDPOINT_ENABLED = true;
+
+    /** GGFS instance name. */
+    private String name;
+
+    /** Cache name to store GGFS meta information. */
+    private String metaCacheName;
+
+    /** Cache name to store file's data blocks. */
+    private String dataCacheName;
+
+    /** File's data block size (bytes). */
+    private int blockSize = DFLT_BLOCK_SIZE;
+
+    /** The number of pre-fetched blocks if specific file's chunk is requested. */
+    private int prefetchBlocks;
+
+    /** Amount of sequential block reads before prefetch is triggered. */
+    private int seqReadsBeforePrefetch;
+
+    /** Read/write buffers size for stream operations (bytes). */
+    private int bufSize = DFLT_BUF_SIZE;
+
+    /** Per node buffer size. */
+    private int perNodeBatchSize = DFLT_PER_NODE_BATCH_SIZE;
+
+    /** Per node parallel operations. */
+    private int perNodeParallelBatchCnt = DFLT_PER_NODE_PARALLEL_BATCH_CNT;
+
+    /** IPC endpoint properties to publish GGFS over. */
+    private Map<String, String> ipcEndpointCfg;
+
+    /** IPC endpoint enabled flag. */
+    private boolean ipcEndpointEnabled = DFLT_IPC_ENDPOINT_ENABLED;
+
+    /** Management port. */
+    private int mgmtPort = DFLT_MGMT_PORT;
+
+    /** Secondary file system */
+    private GridGgfsFileSystem secondaryFs;
+
+    /** GGFS mode. */
+    private GridGgfsMode dfltMode = DFLT_MODE;
+
+    /** Fragmentizer throttling block length. */
+    private long fragmentizerThrottlingBlockLen = DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH;
+
+    /** Fragmentizer throttling delay. */
+    private long fragmentizerThrottlingDelay = DFLT_FRAGMENTIZER_THROTTLING_DELAY;
+
+    /** Fragmentizer concurrent files. */
+    private int fragmentizerConcurrentFiles = DFLT_FRAGMENTIZER_CONCURRENT_FILES;
+
+    /** Fragmentizer local writes ratio. */
+    private float fragmentizerLocWritesRatio = DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO;
+
+    /** Fragmentizer enabled flag. */
+    private boolean fragmentizerEnabled = DFLT_FRAGMENTIZER_ENABLED;
+
+    /** Path modes. */
+    private Map<String, GridGgfsMode> pathModes;
+
+    /** Maximum space. */
+    private long maxSpace;
+
+    /** Trash purge await timeout. */
+    private long trashPurgeTimeout = DFLT_TRASH_PURGE_TIMEOUT;
+
+    /** Dual mode PUT operations executor service. */
+    private ExecutorService dualModePutExec;
+
+    /** Dual mode PUT operations executor service shutdown flag. */
+    private boolean dualModePutExecShutdown;
+
+    /** Maximum amount of data in pending puts. */
+    private long dualModeMaxPendingPutsSize;
+
+    /** Maximum range length. */
+    private long maxTaskRangeLen;
+
+    /**
+     * Constructs default configuration.
+     */
+    public IgniteFsConfiguration() {
+        // No-op.
+    }
+
+    /**
+     * Constructs the copy of the configuration.
+     *
+     * @param cfg Configuration to copy.
+     */
+    public IgniteFsConfiguration(IgniteFsConfiguration cfg) {
+        assert cfg != null;
+
+        /*
+         * Must preserve alphabetical order!
+         */
+        blockSize = cfg.getBlockSize();
+        bufSize = cfg.getStreamBufferSize();
+        dataCacheName = cfg.getDataCacheName();
+        dfltMode = cfg.getDefaultMode();
+        dualModeMaxPendingPutsSize = cfg.getDualModeMaxPendingPutsSize();
+        dualModePutExec = cfg.getDualModePutExecutorService();
+        dualModePutExecShutdown = cfg.getDualModePutExecutorServiceShutdown();
+        fragmentizerConcurrentFiles = cfg.getFragmentizerConcurrentFiles();
+        fragmentizerLocWritesRatio = cfg.getFragmentizerLocalWritesRatio();
+        fragmentizerEnabled = cfg.isFragmentizerEnabled();
+        fragmentizerThrottlingBlockLen = cfg.getFragmentizerThrottlingBlockLength();
+        fragmentizerThrottlingDelay = cfg.getFragmentizerThrottlingDelay();
+        secondaryFs = cfg.getSecondaryFileSystem();
+        ipcEndpointCfg = cfg.getIpcEndpointConfiguration();
+        ipcEndpointEnabled = cfg.isIpcEndpointEnabled();
+        maxSpace = cfg.getMaxSpaceSize();
+        maxTaskRangeLen = cfg.getMaximumTaskRangeLength();
+        metaCacheName = cfg.getMetaCacheName();
+        mgmtPort = cfg.getManagementPort();
+        name = cfg.getName();
+        pathModes = cfg.getPathModes();
+        perNodeBatchSize = cfg.getPerNodeBatchSize();
+        perNodeParallelBatchCnt = cfg.getPerNodeParallelBatchCount();
+        prefetchBlocks = cfg.getPrefetchBlocks();
+        seqReadsBeforePrefetch = cfg.getSequentialReadsBeforePrefetch();
+        trashPurgeTimeout = cfg.getTrashPurgeTimeout();
+    }
+
+    /**
+     * Gets GGFS instance name. If {@code null}, then instance with default
+     * name will be used.
+     *
+     * @return GGFS instance name.
+     */
+    @Nullable public String getName() {
+        return name;
+    }
+
+    /**
+     * Sets GGFS instance name.
+     *
+     * @param name GGFS instance name.
+     */
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    /**
+     * Cache name to store GGFS meta information. If {@code null}, then instance
+     * with default meta-cache name will be used.
+     *
+     * @return Cache name to store GGFS meta information.
+     */
+    @Nullable public String getMetaCacheName() {
+        return metaCacheName;
+    }
+
+    /**
+     * Sets cache name to store GGFS meta information.
+     *
+     * @param metaCacheName Cache name to store GGFS meta information.
+     */
+    public void setMetaCacheName(String metaCacheName) {
+        this.metaCacheName = metaCacheName;
+    }
+
+    /**
+     * Cache name to store GGFS data.
+     *
+     * @return Cache name to store GGFS data.
+     */
+    @Nullable public String getDataCacheName() {
+        return dataCacheName;
+    }
+
+    /**
+     * Sets cache name to store GGFS data.
+     *
+     * @param dataCacheName Cache name to store GGFS data.
+     */
+    public void setDataCacheName(String dataCacheName) {
+        this.dataCacheName = dataCacheName;
+    }
+
+    /**
+     * Get file's data block size.
+     *
+     * @return File's data block size.
+     */
+    public int getBlockSize() {
+        return blockSize;
+    }
+
+    /**
+     * Sets file's data block size.
+     *
+     * @param blockSize File's data block size (bytes) or {@code 0} to reset default value.
+     */
+    public void setBlockSize(int blockSize) {
+        A.ensure(blockSize >= 0, "blockSize >= 0");
+
+        this.blockSize = blockSize == 0 ? DFLT_BLOCK_SIZE : blockSize;
+    }
+
+    /**
+     * Get number of pre-fetched blocks if specific file's chunk is requested.
+     *
+     * @return The number of pre-fetched blocks.
+     */
+    public int getPrefetchBlocks() {
+        return prefetchBlocks;
+    }
+
+    /**
+     * Sets the number of pre-fetched blocks if specific file's chunk is requested.
+     *
+     * @param prefetchBlocks New number of pre-fetched blocks.
+     */
+    public void setPrefetchBlocks(int prefetchBlocks) {
+        A.ensure(prefetchBlocks >= 0, "prefetchBlocks >= 0");
+
+        this.prefetchBlocks = prefetchBlocks;
+    }
+
+    /**
+     * Get amount of sequential block reads before prefetch is triggered. The
+     * higher this value, the longer GGFS will wait before starting to prefetch
+     * values ahead of time. Depending on the use case, this can either help
+     * or hurt performance.
+     * <p>
+     * Default is {@code 0} which means that pre-fetching will start right away.
+     * <h1 class="header">Integration With Hadoop</h1>
+     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
+     * {@code org.gridgain.grid.ggfs.hadoop.GridGgfsHadoopParameters.PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH}
+     * configuration property directly to Hadoop MapReduce task.
+     * <p>
+     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
+     *
+     * @return Amount of sequential block reads.
+     */
+    public int getSequentialReadsBeforePrefetch() {
+        return seqReadsBeforePrefetch;
+    }
+
+    /**
+     * Sets amount of sequential block reads before prefetch is triggered. The
+     * higher this value, the longer GGFS will wait before starting to prefetch
+     * values ahead of time. Depending on the use case, this can either help
+     * or hurt performance.
+     * <p>
+     * Default is {@code 0} which means that pre-fetching will start right away.
+     * <h1 class="header">Integration With Hadoop</h1>
+     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
+     * {@code org.gridgain.grid.ggfs.hadoop.GridGgfsHadoopParameters.PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH}
+     * configuration property directly to Hadoop MapReduce task.
+     * <p>
+     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
+     *
+     * @param seqReadsBeforePrefetch Amount of sequential block reads before prefetch is triggered.
+     */
+    public void setSequentialReadsBeforePrefetch(int seqReadsBeforePrefetch) {
+        A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0");
+
+        this.seqReadsBeforePrefetch = seqReadsBeforePrefetch;
+    }
+
+    /**
+     * Get read/write buffer size for {@code GGFS} stream operations in bytes.
+     *
+     * @return Read/write buffers size (bytes).
+     */
+    public int getStreamBufferSize() {
+        return bufSize;
+    }
+
+    /**
+     * Sets read/write buffers size for {@code GGFS} stream operations (bytes).
+     *
+     * @param bufSize Read/write buffers size for stream operations (bytes) or {@code 0} to reset default value.
+     */
+    public void setStreamBufferSize(int bufSize) {
+        A.ensure(bufSize >= 0, "bufSize >= 0");
+
+        this.bufSize = bufSize == 0 ? DFLT_BUF_SIZE : bufSize;
+    }
+
+    /**
+     * Gets number of file blocks buffered on local node before sending batch to remote node.
+     *
+     * @return Per node buffer size.
+     */
+    public int getPerNodeBatchSize() {
+        return perNodeBatchSize;
+    }
+
+    /**
+     * Sets number of file blocks collected on local node before sending batch to remote node.
+     *
+     * @param perNodeBatchSize Per node buffer size.
+     */
+    public void setPerNodeBatchSize(int perNodeBatchSize) {
+        this.perNodeBatchSize = perNodeBatchSize;
+    }
+
+    /**
+     * Gets number of batches that can be concurrently sent to remote node.
+     *
+     * @return Number of batches for each node.
+     */
+    public int getPerNodeParallelBatchCount() {
+        return perNodeParallelBatchCnt;
+    }
+
+    /**
+     * Sets number of file block batches that can be concurrently sent to remote node.
+     *
+     * @param perNodeParallelBatchCnt Per node parallel load operations.
+     */
+    public void setPerNodeParallelBatchCount(int perNodeParallelBatchCnt) {
+        this.perNodeParallelBatchCnt = perNodeParallelBatchCnt;
+    }
+
+    /**
+     * Gets map of IPC endpoint configuration properties. There are 2 different
+     * types of endpoint supported: {@code shared-memory}, and {@code TCP}.
+     * <p>
+     * The following configuration properties are supported for {@code shared-memory}
+     * endpoint:
+     * <ul>
+     *     <li>{@code type} - value is {@code shmem} to specify {@code shared-memory} approach.</li>
+     *     <li>{@code port} - endpoint port.</li>
+     *     <li>{@code size} - memory size allocated for single endpoint communication.</li>
+     *     <li>
+     *         {@code tokenDirectoryPath} - path, either absolute or relative to {@code GRIDGAIN_HOME} to
+     *         store shared memory tokens.
+     *     </li>
+     * </ul>
+     * <p>
+     * The following configuration properties are supported for {@code TCP} approach:
+     * <ul>
+     *     <li>{@code type} - value is {@code tcp} to specify {@code TCP} approach.</li>
+     *     <li>{@code port} - endpoint bind port.</li>
+     *     <li>
+     *         {@code host} - endpoint bind host. If omitted '127.0.0.1' will be used.
+     *     </li>
+     * </ul>
+     * <p>
+     * Note that {@code shared-memory} approach is not supported on Windows environments.
+     * In case GGFS is failed to bind to particular port, further attempts will be performed every 3 seconds.
+     *
+     * @return Map of IPC endpoint configuration properties. In case the value is not set, defaults will be used. Default
+     * type for Windows is "tcp", for all other platforms - "shmem". Default port is {@link #DFLT_IPC_PORT}.
+     */
+    @Nullable public Map<String,String> getIpcEndpointConfiguration() {
+        return ipcEndpointCfg;
+    }
+
+    /**
+     * Sets IPC endpoint configuration to publish GGFS over.
+     *
+     * @param ipcEndpointCfg Map of IPC endpoint config properties.
+     */
+    public void setIpcEndpointConfiguration(@Nullable Map<String,String> ipcEndpointCfg) {
+        this.ipcEndpointCfg = ipcEndpointCfg;
+    }
+
+    /**
+     * Get IPC endpoint enabled flag. In case it is set to {@code true} endpoint will be created and bound to specific
+     * port. Otherwise endpoint will not be created. Default value is {@link #DFLT_IPC_ENDPOINT_ENABLED}.
+     *
+     * @return {@code True} in case endpoint is enabled.
+     */
+    public boolean isIpcEndpointEnabled() {
+        return ipcEndpointEnabled;
+    }
+
+    /**
+     * Set IPC endpoint enabled flag. See {@link #isIpcEndpointEnabled()}.
+     *
+     * @param ipcEndpointEnabled IPC endpoint enabled flag.
+     */
+    public void setIpcEndpointEnabled(boolean ipcEndpointEnabled) {
+        this.ipcEndpointEnabled = ipcEndpointEnabled;
+    }
+
+    /**
+     * Gets port number for management endpoint. All GGFS nodes should have this port open
+     * for Visor Management Console to work with GGFS.
+     * <p>
+     * Default value is {@link #DFLT_MGMT_PORT}
+     *
+     * @return Port number or {@code -1} if management endpoint should be disabled.
+     */
+    public int getManagementPort() {
+        return mgmtPort;
+    }
+
+    /**
+     * Sets management endpoint port.
+     *
+     * @param mgmtPort port number or {@code -1} to disable management endpoint.
+     */
+    public void setManagementPort(int mgmtPort) {
+        this.mgmtPort = mgmtPort;
+    }
+
+    /**
+     * Gets mode to specify how {@code GGFS} interacts with Hadoop file system, like {@code HDFS}.
+     * Secondary Hadoop file system is provided for pass-through, write-through, and read-through
+     * purposes.
+     * <p>
+     * Default mode is {@link GridGgfsMode#DUAL_ASYNC}. If secondary Hadoop file system is
+     * not configured, this mode will work just like {@link GridGgfsMode#PRIMARY} mode.
+     *
+     * @return Mode to specify how GGFS interacts with secondary HDFS file system.
+     */
+    public GridGgfsMode getDefaultMode() {
+        return dfltMode;
+    }
+
+    /**
+     * Sets {@code GGFS} mode to specify how it should interact with secondary
+     * Hadoop file system, like {@code HDFS}. Secondary Hadoop file system is provided
+     * for pass-through, write-through, and read-through purposes.
+     *
+     * @param dfltMode {@code GGFS} mode.
+     */
+    public void setDefaultMode(GridGgfsMode dfltMode) {
+        this.dfltMode = dfltMode;
+    }
+
+    /**
+     * Gets the secondary file system. Secondary file system is provided for pass-through, write-through,
+     * and read-through purposes.
+     *
+     * @return Secondary file system.
+     */
+    public GridGgfsFileSystem getSecondaryFileSystem() {
+        return secondaryFs;
+    }
+
+    /**
+     * Sets the secondary file system. Secondary file system is provided for pass-through, write-through,
+     * and read-through purposes.
+     *
+     * @param fileSystem
+     */
+    public void setSecondaryFileSystem(GridGgfsFileSystem fileSystem) {
+        secondaryFs = fileSystem;
+    }
+
+    /**
+     * Gets map of path prefixes to {@code GGFS} modes used for them.
+     * <p>
+     * If path doesn't correspond to any specified prefix or mappings are not provided, then
+     * {@link #getDefaultMode()} is used.
+     * <p>
+     * Several folders under {@code '/gridgain'} folder have predefined mappings which cannot be overridden.
+     * <li>{@code /gridgain/primary} and all it's sub-folders will always work in {@code PRIMARY} mode.</li>
+     * <p>
+     * And in case secondary file system URI is provided:
+     * <li>{@code /gridgain/proxy} and all it's sub-folders will always work in {@code PROXY} mode.</li>
+     * <li>{@code /gridgain/sync} and all it's sub-folders will always work in {@code DUAL_SYNC} mode.</li>
+     * <li>{@code /gridgain/async} and all it's sub-folders will always work in {@code DUAL_ASYNC} mode.</li>
+     *
+     * @return Map of paths to {@code GGFS} modes.
+     */
+    @Nullable public Map<String, GridGgfsMode> getPathModes() {
+        return pathModes;
+    }
+
+    /**
+     * Sets map of path prefixes to {@code GGFS} modes used for them.
+     * <p>
+     * If path doesn't correspond to any specified prefix or mappings are not provided, then
+     * {@link #getDefaultMode()} is used.
+     *
+     * @param pathModes Map of paths to {@code GGFS} modes.
+     */
+    public void setPathModes(Map<String, GridGgfsMode> pathModes) {
+        this.pathModes = pathModes;
+    }
+
+    /**
+     * Gets the length of file chunk to send before delaying the fragmentizer.
+     *
+     * @return File chunk length in bytes.
+     */
+    public long getFragmentizerThrottlingBlockLength() {
+        return fragmentizerThrottlingBlockLen;
+    }
+
+    /**
+     * Sets length of file chunk to transmit before throttling is delayed.
+     *
+     * @param fragmentizerThrottlingBlockLen Block length in bytes.
+     */
+    public void setFragmentizerThrottlingBlockLength(long fragmentizerThrottlingBlockLen) {
+        this.fragmentizerThrottlingBlockLen = fragmentizerThrottlingBlockLen;
+    }
+
+    /**
+     * Gets throttle delay for fragmentizer.
+     *
+     * @return Throttle delay in milliseconds.
+     */
+    public long getFragmentizerThrottlingDelay() {
+        return fragmentizerThrottlingDelay;
+    }
+
+    /**
+     * Sets delay in milliseconds for which fragmentizer is paused.
+     *
+     * @param fragmentizerThrottlingDelay Delay in milliseconds.
+     */
+    public void setFragmentizerThrottlingDelay(long fragmentizerThrottlingDelay) {
+        this.fragmentizerThrottlingDelay = fragmentizerThrottlingDelay;
+    }
+
+    /**
+     * Gets number of files that can be processed by fragmentizer concurrently.
+     *
+     * @return Number of files to process concurrently.
+     */
+    public int getFragmentizerConcurrentFiles() {
+        return fragmentizerConcurrentFiles;
+    }
+
+    /**
+     * Sets number of files to process concurrently by fragmentizer.
+     *
+     * @param fragmentizerConcurrentFiles Number of files to process concurrently.
+     */
+    public void setFragmentizerConcurrentFiles(int fragmentizerConcurrentFiles) {
+        this.fragmentizerConcurrentFiles = fragmentizerConcurrentFiles;
+    }
+
+    /**
+     * Gets amount of local memory (in % of local GGFS max space size) available for local writes
+     * during file creation.
+     * <p>
+     * If current GGFS space size is less than {@code fragmentizerLocalWritesRatio * maxSpaceSize},
+     * then file blocks will be written to the local node first and then asynchronously distributed
+     * among cluster nodes (fragmentized).
+     * <p>
+     * Default value is {@link #DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO}.
+     *
+     * @return Ratio for local writes space.
+     */
+    public float getFragmentizerLocalWritesRatio() {
+        return fragmentizerLocWritesRatio;
+    }
+
+    /**
+     * Sets ratio for space available for local file writes.
+     *
+     * @param fragmentizerLocWritesRatio Ratio for local file writes.
+     * @see #getFragmentizerLocalWritesRatio()
+     */
+    public void setFragmentizerLocalWritesRatio(float fragmentizerLocWritesRatio) {
+        this.fragmentizerLocWritesRatio = fragmentizerLocWritesRatio;
+    }
+
+    /**
+     * Gets flag indicating whether GGFS fragmentizer is enabled. If fragmentizer is disabled, files will be
+     * written in distributed fashion.
+     *
+     * @return Flag indicating whether fragmentizer is enabled.
+     */
+    public boolean isFragmentizerEnabled() {
+        return fragmentizerEnabled;
+    }
+
+    /**
+     * Sets property indicating whether fragmentizer is enabled.
+     *
+     * @param fragmentizerEnabled {@code True} if fragmentizer is enabled.
+     */
+    public void setFragmentizerEnabled(boolean fragmentizerEnabled) {
+        this.fragmentizerEnabled = fragmentizerEnabled;
+    }
+
+    /**
+     * Get maximum space available for data cache to store file system entries.
+     *
+     * @return Maximum space available for data cache.
+     */
+    public long getMaxSpaceSize() {
+        return maxSpace;
+    }
+
+    /**
+     * Set maximum space in bytes available in data cache.
+     *
+     * @param maxSpace Maximum space available in data cache.
+     */
+    public void setMaxSpaceSize(long maxSpace) {
+        this.maxSpace = maxSpace;
+    }
+
+    /**
+     * Gets maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     *
+     * @return Maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     */
+    public long getTrashPurgeTimeout() {
+        return trashPurgeTimeout;
+    }
+
+    /**
+     * Sets maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     *
+     * @param trashPurgeTimeout Maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     */
+    public void setTrashPurgeTimeout(long trashPurgeTimeout) {
+        this.trashPurgeTimeout = trashPurgeTimeout;
+    }
+
+    /**
+     * Get DUAL mode put operation executor service. This executor service will process cache PUT requests for
+     * data which came from the secondary file system and about to be written to GGFS data cache.
+     * In case no executor service is provided, default one will be created with maximum amount of threads equals
+     * to amount of processor cores.
+     *
+     * @return Get DUAL mode put operation executor service
+     */
+    @Nullable public ExecutorService getDualModePutExecutorService() {
+        return dualModePutExec;
+    }
+
+    /**
+     * Set DUAL mode put operations executor service.
+     *
+     * @param dualModePutExec Dual mode put operations executor service.
+     */
+    public void setDualModePutExecutorService(ExecutorService dualModePutExec) {
+        this.dualModePutExec = dualModePutExec;
+    }
+
+    /**
+     * Get DUAL mode put operation executor service shutdown flag.
+     *
+     * @return DUAL mode put operation executor service shutdown flag.
+     */
+    public boolean getDualModePutExecutorServiceShutdown() {
+        return dualModePutExecShutdown;
+    }
+
+    /**
+     * Set DUAL mode put operations executor service shutdown flag.
+     *
+     * @param dualModePutExecShutdown Dual mode put operations executor service shutdown flag.
+     */
+    public void setDualModePutExecutorServiceShutdown(boolean dualModePutExecShutdown) {
+        this.dualModePutExecShutdown = dualModePutExecShutdown;
+    }
+
+    /**
+     * Get maximum amount of pending data read from the secondary file system and waiting to be written to data
+     * cache. {@code 0} or negative value stands for unlimited size.
+     * <p>
+     * By default this value is set to {@code 0}. It is recommended to set positive value in case your
+     * application performs frequent reads of large amount of data from the secondary file system in order to
+     * avoid issues with increasing GC pauses or out-of-memory error.
+     *
+     * @return Maximum amount of pending data read from the secondary file system
+     */
+    public long getDualModeMaxPendingPutsSize() {
+        return dualModeMaxPendingPutsSize;
+    }
+
+    /**
+     * Set maximum amount of data in pending put operations.
+     *
+     * @param dualModeMaxPendingPutsSize Maximum amount of data in pending put operations.
+     */
+    public void setDualModeMaxPendingPutsSize(long dualModeMaxPendingPutsSize) {
+        this.dualModeMaxPendingPutsSize = dualModeMaxPendingPutsSize;
+    }
+
+    /**
+     * Get maximum default range size of a file being split during GGFS task execution. When GGFS task is about to
+     * be executed, it requests file block locations first. Each location is defined as {@link GridGgfsFileRange} which
+     * has length. In case this parameter is set to positive value, then GGFS will split single file range into smaller
+     * ranges with length not greater that this parameter. The only exception to this case is when maximum task range
+     * length is smaller than file block size. In this case maximum task range size will be overridden and set to file
+     * block size.
+     * <p>
+     * Note that this parameter is applied when task is split into jobs before {@link GridGgfsRecordResolver} is
+     * applied. Therefore, final file ranges being assigned to particular jobs could be greater than value of this
+     * parameter depending on file data layout and selected resolver type.
+     * <p>
+     * Setting this parameter might be useful when file is highly colocated and have very long consequent data chunks
+     * so that task execution suffers from insufficient parallelism. E.g., in case you have one GGFS node in topology
+     * and want to process 1Gb file, then only single range of length 1Gb will be returned. This will result in
+     * a single job which will be processed in one thread. But in case you provide this configuration parameter and set
+     * maximum range length to 16Mb, then 64 ranges will be returned resulting in 64 jobs which could be executed in
+     * parallel.
+     * <p>
+     * Note that some {@code GridGgfs.execute()} methods can override value of this parameter.
+     * <p>
+     * In case value of this parameter is set to {@code 0} or negative value, it is simply ignored. Default value is
+     * {@code 0}.
+     *
+     * @return Maximum range size of a file being split during GGFS task execution.
+     */
+    public long getMaximumTaskRangeLength() {
+        return maxTaskRangeLen;
+    }
+
+    /**
+     * Set maximum default range size of a file being split during GGFS task execution.
+     * See {@link #getMaximumTaskRangeLength()} for more details.
+     *
+     * @param maxTaskRangeLen Set maximum default range size of a file being split during GGFS task execution.
+     */
+    public void setMaximumTaskRangeLength(long maxTaskRangeLen) {
+        this.maxTaskRangeLen = maxTaskRangeLen;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(IgniteFsConfiguration.class, this);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/cd01ed99/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsException.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsException.java b/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsException.java
new file mode 100644
index 0000000..d72ed40
--- /dev/null
+++ b/modules/core/src/main/java/org/gridgain/grid/ggfs/IgniteFsException.java
@@ -0,0 +1,49 @@
+/* @java.file.header */
+
+/*  _________        _____ __________________        _____
+ *  __  ____/___________(_)______  /__  ____/______ ____(_)_______
+ *  _  / __  __  ___/__  / _  __  / _  / __  _  __ `/__  / __  __ \
+ *  / /_/ /  _  /    _  /  / /_/ /  / /_/ /  / /_/ / _  /  _  / / /
+ *  \____/   /_/     /_/   \_,__/   \____/   \__,_/  /_/   /_/ /_/
+ */
+
+package org.gridgain.grid.ggfs;
+
+import org.gridgain.grid.*;
+import org.jetbrains.annotations.*;
+
+/**
+ * {@code GGFS} exception thrown by file system components.
+ */
+public class IgniteFsException extends GridException {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /**
+     * Creates an instance of GGFS exception with descriptive error message.
+     *
+     * @param msg Error message.
+     */
+    public IgniteFsException(String msg) {
+        super(msg);
+    }
+
+    /**
+     * Creates an instance of GGFS exception caused by nested exception.
+     *
+     * @param cause Exception cause.
+     */
+    public IgniteFsException(Throwable cause) {
+        super(cause);
+    }
+
+    /**
+     * Creates an instance of GGFS exception with error message and underlying cause.
+     *
+     * @param msg Error message.
+     * @param cause Exception cause.
+     */
+    public IgniteFsException(String msg, @Nullable Throwable cause) {
+        super(msg, cause);
+    }
+}


Mime
View raw message