ignite-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From voze...@apache.org
Subject [54/57] [abbrv] incubator-ignite git commit: # IGNITE-226: WIP (13)
Date Fri, 13 Feb 2015 10:55:04 GMT
# IGNITE-226: WIP (13)


Project: http://git-wip-us.apache.org/repos/asf/incubator-ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ignite/commit/88bf1443
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ignite/tree/88bf1443
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ignite/diff/88bf1443

Branch: refs/heads/ignite-226
Commit: 88bf1443a196a330a372d03778bcaa6f3a861892
Parents: 5f85d98
Author: vozerov-gridgain <vozerov@gridgain.com>
Authored: Fri Feb 13 13:15:34 2015 +0300
Committer: vozerov-gridgain <vozerov@gridgain.com>
Committed: Fri Feb 13 13:15:34 2015 +0300

----------------------------------------------------------------------
 examples/config/filesystem/example-ignitefs.xml |   2 +-
 .../ignite/examples/ignitefs/IgfsExample.java   | 278 +++++++
 .../examples/ignitefs/IgfsMapReduceExample.java | 249 ++++++
 .../examples/ignitefs/IgfsNodeStartup.java      |  41 +
 .../examples/ignitefs/IgniteFsExample.java      | 278 -------
 .../ignitefs/IgniteFsMapReduceExample.java      | 249 ------
 .../examples/ignitefs/IgniteFsNodeStartup.java  |  41 -
 .../ignite/examples/IgfsExamplesSelfTest.java   |   2 +-
 .../src/main/java/org/apache/ignite/Ignite.java |  12 +-
 .../main/java/org/apache/ignite/IgniteFs.java   |   6 +-
 .../ignite/configuration/IgfsConfiguration.java | 807 +++++++++++++++++++
 .../configuration/IgniteConfiguration.java      |   6 +-
 .../configuration/IgniteFsConfiguration.java    | 807 -------------------
 .../org/apache/ignite/events/EventType.java     |  26 +-
 .../org/apache/ignite/events/IgfsEvent.java     | 197 +++++
 .../org/apache/ignite/events/IgniteFsEvent.java | 197 -----
 .../org/apache/ignite/ignitefs/IgfsMetrics.java |   2 +-
 .../org/apache/ignite/ignitefs/IgfsMode.java    |   8 +-
 .../ignite/ignitefs/mapreduce/IgfsTask.java     |   2 +-
 .../ignite/ignitefs/mapreduce/IgfsTaskArgs.java |   2 +-
 .../org/apache/ignite/internal/IgnitionEx.java  |   6 +-
 .../processors/cache/GridCacheAdapter.java      |   4 +-
 .../processors/cache/GridCacheProcessor.java    |   4 +-
 .../processors/cache/GridCacheUtils.java        |   4 +-
 .../internal/processors/fs/IgfsAsyncImpl.java   |   2 +-
 .../internal/processors/fs/IgfsContext.java     |   6 +-
 .../internal/processors/fs/IgfsDataManager.java |   2 +-
 .../processors/fs/IgfsDeleteWorker.java         |   2 +-
 .../internal/processors/fs/IgfsFileInfo.java    |   4 +-
 .../ignite/internal/processors/fs/IgfsImpl.java |  40 +-
 .../internal/processors/fs/IgfsMetaManager.java |  28 +-
 .../internal/processors/fs/IgfsProcessor.java   |  12 +-
 .../processors/fs/IgfsServerManager.java        |   4 +-
 .../visor/node/VisorGgfsConfiguration.java      |   6 +-
 .../internal/visor/util/VisorTaskUtils.java     |   2 +-
 modules/core/src/test/config/ggfs-loopback.xml  |   2 +-
 modules/core/src/test/config/ggfs-shmem.xml     |   2 +-
 .../ignitefs/IgfsEventsAbstractSelfTest.java    | 112 +--
 .../IgfsFragmentizerAbstractSelfTest.java       |   2 +-
 ...heIgfsPerBlockLruEvictionPolicySelfTest.java |   4 +-
 .../processors/fs/IgfsAbstractSelfTest.java     |   2 +-
 .../processors/fs/IgfsCacheSelfTest.java        |   2 +-
 .../processors/fs/IgfsDataManagerSelfTest.java  |   2 +-
 .../processors/fs/IgfsMetaManagerSelfTest.java  |   2 +-
 .../processors/fs/IgfsMetricsSelfTest.java      |   4 +-
 .../processors/fs/IgfsModesSelfTest.java        |   4 +-
 .../processors/fs/IgfsProcessorSelfTest.java    |   4 +-
 .../fs/IgfsProcessorValidationSelfTest.java     |  32 +-
 ...IpcEndpointRegistrationAbstractSelfTest.java |   6 +-
 ...dpointRegistrationOnLinuxAndMacSelfTest.java |   2 +-
 .../processors/fs/IgfsSizeSelfTest.java         |   2 +-
 .../processors/fs/IgfsStreamsSelfTest.java      |   2 +-
 .../processors/fs/IgfsTaskSelfTest.java         |   2 +-
 .../IgfsAbstractRecordResolverSelfTest.java     |   2 +-
 .../ipc/shmem/IpcSharedMemoryNodeStartup.java   |   2 +-
 .../ignitefs/hadoop/IgfsHadoopParameters.java   |   2 +-
 .../hadoop/v1/IgfsHadoopFileSystem.java         |   2 +-
 .../hadoop/v2/IgfsHadoopFileSystem.java         |   2 +-
 .../internal/fs/hadoop/IgfsHadoopEndpoint.java  |   2 +-
 .../fs/GridHadoopDistributedFileSystem.java     |   2 +-
 .../ignite/ignitefs/IgfsEventsTestSuite.java    |  40 +-
 .../IgfsHadoop20FileSystemAbstractSelfTest.java |   8 +-
 .../IgfsHadoopDualAbstractSelfTest.java         |   2 +-
 .../IgfsHadoopFileSystemAbstractSelfTest.java   |   8 +-
 .../IgfsHadoopFileSystemClientSelfTest.java     |   2 +-
 .../IgfsHadoopFileSystemHandshakeSelfTest.java  |   2 +-
 .../IgfsHadoopFileSystemIpcCacheSelfTest.java   |   4 +-
 ...IgfsHadoopFileSystemLoggerStateSelfTest.java |   2 +-
 ...fsHadoopFileSystemSecondaryModeSelfTest.java |   4 +-
 .../ignitefs/IgfsNearOnlyMultiNodeSelfTest.java |   2 +-
 .../hadoop/GridHadoopAbstractSelfTest.java      |   4 +-
 ...idHadoopDefaultMapReducePlannerSelfTest.java |   2 +-
 .../hadoop/GridHadoopTaskExecutionSelfTest.java |   4 +-
 73 files changed, 1812 insertions(+), 1812 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/examples/config/filesystem/example-ignitefs.xml
----------------------------------------------------------------------
diff --git a/examples/config/filesystem/example-ignitefs.xml b/examples/config/filesystem/example-ignitefs.xml
index 8ffb278..8cd72a3 100644
--- a/examples/config/filesystem/example-ignitefs.xml
+++ b/examples/config/filesystem/example-ignitefs.xml
@@ -65,7 +65,7 @@
 
         <property name="ggfsConfiguration">
             <list>
-                <bean class="org.apache.ignite.configuration.IgniteFsConfiguration">
+                <bean class="org.apache.ignite.configuration.IgfsConfiguration">
                     <property name="name" value="ignitefs"/>
                     <property name="metaCacheName" value="ignitefs-meta"/>
                     <property name="dataCacheName" value="ignitefs-data"/>

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsExample.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsExample.java b/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsExample.java
new file mode 100644
index 0000000..ae8ca5e
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsExample.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ignitefs;
+
+import org.apache.ignite.*;
+import org.apache.ignite.ignitefs.*;
+import org.jetbrains.annotations.*;
+
+import java.io.*;
+import java.util.*;
+
+/**
+ * Example that shows usage of {@link org.apache.ignite.IgniteFs} API. It starts a node with {@code IgniteFs}
+ * configured and performs several file system operations (create, write, append, read and delete
+ * files, create, list and delete directories).
+ * <p>
+ * Remote nodes should always be started with configuration file which includes
+ * IgniteFs: {@code 'ignite.sh examples/config/filesystem/example-ignitefs.xml'}.
+ * <p>
+ * Alternatively you can run {@link IgfsNodeStartup} in another JVM which will start
+ * node with {@code examples/config/filesystem/example-ignitefs.xml} configuration.
+ */
+public final class IgfsExample {
+    /**
+     * Executes example.
+     *
+     * @param args Command line arguments, none required.
+     * @throws Exception If example execution failed.
+     */
+    public static void main(String[] args) throws Exception {
+        Ignite ignite = Ignition.start("examples/config/filesystem/example-ignitefs.xml");
+
+        System.out.println();
+        System.out.println(">>> IgniteFs example started.");
+
+        try {
+            // Get an instance of Ignite File System.
+            IgniteFs fs = ignite.fileSystem("ignitefs");
+
+            // Working directory path.
+            IgfsPath workDir = new IgfsPath("/examples/fs");
+
+            // Cleanup working directory.
+            delete(fs, workDir);
+
+            // Create empty working directory.
+            mkdirs(fs, workDir);
+
+            // Print information for working directory.
+            printInfo(fs, workDir);
+
+            // File path.
+            IgfsPath filePath = new IgfsPath(workDir, "file.txt");
+
+            // Create file.
+            create(fs, filePath, new byte[] {1, 2, 3});
+
+            // Print information for file.
+            printInfo(fs, filePath);
+
+            // Append more data to previously created file.
+            append(fs, filePath, new byte[] {4, 5});
+
+            // Print information for file.
+            printInfo(fs, filePath);
+
+            // Read data from file.
+            read(fs, filePath);
+
+            // Delete file.
+            delete(fs, filePath);
+
+            // Print information for file.
+            printInfo(fs, filePath);
+
+            // Create several files.
+            for (int i = 0; i < 5; i++)
+                create(fs, new IgfsPath(workDir, "file-" + i + ".txt"), null);
+
+            list(fs, workDir);
+        }
+        finally {
+            Ignition.stop(false);
+        }
+    }
+
+    /**
+     * Deletes file or directory. If directory
+     * is not empty, it's deleted recursively.
+     *
+     * @param fs IgniteFs.
+     * @param path File or directory path.
+     * @throws IgniteException In case of error.
+     */
+    private static void delete(IgniteFs fs, IgfsPath path) throws IgniteException {
+        assert fs != null;
+        assert path != null;
+
+        if (fs.exists(path)) {
+            boolean isFile = fs.info(path).isFile();
+
+            try {
+                fs.delete(path, true);
+
+                System.out.println();
+                System.out.println(">>> Deleted " + (isFile ? "file" : "directory") + ": " + path);
+            }
+            catch (IgfsException e) {
+                System.out.println();
+                System.out.println(">>> Failed to delete " + (isFile ? "file" : "directory") + " [path=" + path +
+                    ", msg=" + e.getMessage() + ']');
+            }
+        }
+        else {
+            System.out.println();
+            System.out.println(">>> Won't delete file or directory (doesn't exist): " + path);
+        }
+    }
+
+    /**
+     * Creates directories.
+     *
+     * @param fs IgniteFs.
+     * @param path Directory path.
+     * @throws IgniteException In case of error.
+     */
+    private static void mkdirs(IgniteFs fs, IgfsPath path) throws IgniteException {
+        assert fs != null;
+        assert path != null;
+
+        try {
+            fs.mkdirs(path);
+
+            System.out.println();
+            System.out.println(">>> Created directory: " + path);
+        }
+        catch (IgfsException e) {
+            System.out.println();
+            System.out.println(">>> Failed to create a directory [path=" + path + ", msg=" + e.getMessage() + ']');
+        }
+
+        System.out.println();
+    }
+
+    /**
+     * Creates file and writes provided data to it.
+     *
+     * @param fs IgniteFs.
+     * @param path File path.
+     * @param data Data.
+     * @throws IgniteException If file can't be created.
+     * @throws IOException If data can't be written.
+     */
+    private static void create(IgniteFs fs, IgfsPath path, @Nullable byte[] data)
+        throws IgniteException, IOException {
+        assert fs != null;
+        assert path != null;
+
+        try (OutputStream out = fs.create(path, true)) {
+            System.out.println();
+            System.out.println(">>> Created file: " + path);
+
+            if (data != null) {
+                out.write(data);
+
+                System.out.println();
+                System.out.println(">>> Wrote data to file: " + path);
+            }
+        }
+
+        System.out.println();
+    }
+
+    /**
+     * Opens file and appends provided data to it.
+     *
+     * @param fs IgniteFs.
+     * @param path File path.
+     * @param data Data.
+     * @throws IgniteException If file can't be created.
+     * @throws IOException If data can't be written.
+     */
+    private static void append(IgniteFs fs, IgfsPath path, byte[] data) throws IgniteException, IOException {
+        assert fs != null;
+        assert path != null;
+        assert data != null;
+        assert fs.info(path).isFile();
+
+        try (OutputStream out = fs.append(path, true)) {
+            System.out.println();
+            System.out.println(">>> Opened file: " + path);
+
+            out.write(data);
+        }
+
+        System.out.println();
+        System.out.println(">>> Appended data to file: " + path);
+    }
+
+    /**
+     * Opens file and reads it to byte array.
+     *
+     * @param fs IgniteFs.
+     * @param path File path.
+     * @throws IgniteException If file can't be opened.
+     * @throws IOException If data can't be read.
+     */
+    private static void read(IgniteFs fs, IgfsPath path) throws IgniteException, IOException {
+        assert fs != null;
+        assert path != null;
+        assert fs.info(path).isFile();
+
+        byte[] data = new byte[(int)fs.info(path).length()];
+
+        try (IgfsInputStream in = fs.open(path)) {
+            in.read(data);
+        }
+
+        System.out.println();
+        System.out.println(">>> Read data from " + path + ": " + Arrays.toString(data));
+    }
+
+    /**
+     * Lists files in directory.
+     *
+     * @param fs IgniteFs.
+     * @param path Directory path.
+     * @throws IgniteException In case of error.
+     */
+    private static void list(IgniteFs fs, IgfsPath path) throws IgniteException {
+        assert fs != null;
+        assert path != null;
+        assert fs.info(path).isDirectory();
+
+        Collection<IgfsPath> files = fs.listPaths(path);
+
+        if (files.isEmpty()) {
+            System.out.println();
+            System.out.println(">>> No files in directory: " + path);
+        }
+        else {
+            System.out.println();
+            System.out.println(">>> List of files in directory: " + path);
+
+            for (IgfsPath f : files)
+                System.out.println(">>>     " + f.name());
+        }
+
+        System.out.println();
+    }
+
+    /**
+     * Prints information for file or directory.
+     *
+     * @param fs IgniteFs.
+     * @param path File or directory path.
+     * @throws IgniteException In case of error.
+     */
+    private static void printInfo(IgniteFs fs, IgfsPath path) throws IgniteException {
+        System.out.println();
+        System.out.println("Information for " + path + ": " + fs.info(path));
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsMapReduceExample.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsMapReduceExample.java b/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsMapReduceExample.java
new file mode 100644
index 0000000..9a4be5f
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsMapReduceExample.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ignitefs;
+
+import org.apache.ignite.*;
+import org.apache.ignite.compute.*;
+import org.apache.ignite.ignitefs.*;
+import org.apache.ignite.ignitefs.mapreduce.*;
+import org.apache.ignite.ignitefs.mapreduce.records.*;
+
+import java.io.*;
+import java.util.*;
+
+/**
+ * Example that shows how to use {@link org.apache.ignite.ignitefs.mapreduce.IgfsTask} to find lines matching particular pattern in the file in pretty
+ * the same way as {@code grep} command does.
+ * <p>
+ * Remote nodes should always be started with configuration file which includes
+ * IgniteFs: {@code 'ignite.sh examples/config/filesystem/example-ignitefs.xml'}.
+ * <p>
+ * Alternatively you can run {@link IgfsNodeStartup} in another JVM which will start
+ * node with {@code examples/config/filesystem/example-ignitefs.xml} configuration.
+ */
+public class IgfsMapReduceExample {
+    /**
+     * Executes example.
+     *
+     * @param args Command line arguments. First argument is file name, second argument is regex to look for.
+     * @throws Exception If failed.
+     */
+    public static void main(String[] args) throws Exception {
+        if (args.length == 0)
+            System.out.println("Please provide file name and regular expression.");
+        else if (args.length == 1)
+            System.out.println("Please provide regular expression.");
+        else {
+            try (Ignite ignite = Ignition.start("examples/config/filesystem/example-ignitefs.xml")) {
+                System.out.println();
+                System.out.println(">>> IgniteFs map reduce example started.");
+
+                // Prepare arguments.
+                String fileName = args[0];
+
+                File file = new File(fileName);
+
+                String regexStr = args[1];
+
+                // Get an instance of Ignite File System.
+                IgniteFs fs = ignite.fileSystem("ignitefs");
+
+                // Working directory path.
+                IgfsPath workDir = new IgfsPath("/examples/fs");
+
+                // Write file to GGFS.
+                IgfsPath fsPath = new IgfsPath(workDir, file.getName());
+
+                writeFile(fs, fsPath, file);
+
+                Collection<Line> lines = fs.execute(new GrepTask(), IgfsNewLineRecordResolver.NEW_LINE,
+                    Collections.singleton(fsPath), regexStr);
+
+                if (lines.isEmpty()) {
+                    System.out.println();
+                    System.out.println("No lines were found.");
+                }
+                else {
+                    for (Line line : lines)
+                        print(line.fileLine());
+                }
+            }
+        }
+    }
+
+    /**
+     * Write file to the Ignite file system.
+     *
+     * @param fs Ignite file system.
+     * @param fsPath Ignite file system path.
+     * @param file File to write.
+     * @throws Exception In case of exception.
+     */
+    private static void writeFile(IgniteFs fs, IgfsPath fsPath, File file) throws Exception {
+        System.out.println();
+        System.out.println("Copying file to IgniteFs: " + file);
+
+        try (
+            IgfsOutputStream os = fs.create(fsPath, true);
+            FileInputStream fis = new FileInputStream(file)
+        ) {
+            byte[] buf = new byte[2048];
+
+            int read = fis.read(buf);
+
+            while (read != -1) {
+                os.write(buf, 0, read);
+
+                read = fis.read(buf);
+            }
+        }
+    }
+
+    /**
+     * Print particular string.
+     *
+     * @param str String.
+     */
+    private static void print(String str) {
+        System.out.println(">>> " + str);
+    }
+
+    /**
+     * Grep task.
+     */
+    private static class GrepTask extends IgfsTask<String, Collection<Line>> {
+        /** {@inheritDoc} */
+        @Override public IgfsJob createJob(IgfsPath path, IgfsFileRange range,
+            IgfsTaskArgs<String> args) {
+            return new GrepJob(args.userArgument());
+        }
+
+        /** {@inheritDoc} */
+        @Override public Collection<Line> reduce(List<ComputeJobResult> results) {
+            Collection<Line> lines = new TreeSet<>(new Comparator<Line>() {
+                @Override public int compare(Line line1, Line line2) {
+                    return line1.rangePosition() < line2.rangePosition() ? -1 :
+                        line1.rangePosition() > line2.rangePosition() ? 1 : line1.lineIndex() - line2.lineIndex();
+                }
+            });
+
+            for (ComputeJobResult res : results) {
+                if (res.getException() != null)
+                    throw res.getException();
+
+                Collection<Line> line = res.getData();
+
+                if (line != null)
+                    lines.addAll(line);
+            }
+
+            return lines;
+        }
+    }
+
+    /**
+     * Grep job.
+     */
+    private static class GrepJob extends IgfsInputStreamJobAdapter {
+        /** Regex string. */
+        private final String regex;
+
+        /**
+         * Constructor.
+         *
+         * @param regex Regex string.
+         */
+        private GrepJob(String regex) {
+            this.regex = regex;
+        }
+
+        /**  {@inheritDoc} */
+        @Override public Object execute(IgniteFs igniteFs, IgfsRangeInputStream in) throws IgniteException, IOException {
+            Collection<Line> res = null;
+
+            long start = in.startOffset();
+
+            try (BufferedReader br = new BufferedReader(new InputStreamReader(in))) {
+                int ctr = 0;
+
+                String line = br.readLine();
+
+                while (line != null) {
+                    if (line.matches(".*" + regex + ".*")) {
+                        if (res == null)
+                            res = new HashSet<>();
+
+                        res.add(new Line(start, ctr++, line));
+                    }
+
+                    line = br.readLine();
+                }
+            }
+
+            return res;
+        }
+    }
+
+    /**
+     * Single file line with it's position.
+     */
+    private static class Line {
+        /** Line start position in the file. */
+        private long rangePos;
+
+        /** Matching line index within the range. */
+        private final int lineIdx;
+
+        /** File line. */
+        private String line;
+
+        /**
+         * Constructor.
+         *
+         * @param rangePos Range position.
+         * @param lineIdx Matching line index within the range.
+         * @param line File line.
+         */
+        private Line(long rangePos, int lineIdx, String line) {
+            this.rangePos = rangePos;
+            this.lineIdx = lineIdx;
+            this.line = line;
+        }
+
+        /**
+         * @return Range position.
+         */
+        public long rangePosition() {
+            return rangePos;
+        }
+
+        /**
+         * @return Matching line index within the range.
+         */
+        public int lineIndex() {
+            return lineIdx;
+        }
+
+        /**
+         * @return File line.
+         */
+        public String fileLine() {
+            return line;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsNodeStartup.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsNodeStartup.java b/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsNodeStartup.java
new file mode 100644
index 0000000..cbf2b77
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgfsNodeStartup.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ignitefs;
+
+import org.apache.ignite.*;
+
+/**
+ * Starts up an empty node with IgniteFs configuration.
+ * You can also start a stand-alone Ignite instance by passing the path
+ * to configuration file to {@code 'ignite.{sh|bat}'} script, like so:
+ * {@code 'ignite.sh examples/config/filesystem/example-ignitefs.xml'}.
+ * <p>
+ * The difference is that running this class from IDE adds all example classes to classpath
+ * but running from command line doesn't.
+ */
+public class IgfsNodeStartup {
+    /**
+     * Start up an empty node with specified cache configuration.
+     *
+     * @param args Command line arguments, none required.
+     * @throws IgniteException If example execution failed.
+     */
+    public static void main(String[] args) throws IgniteException {
+        Ignition.start("examples/config/filesystem/example-ignitefs.xml");
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsExample.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsExample.java b/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsExample.java
deleted file mode 100644
index cd247a4..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsExample.java
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples.ignitefs;
-
-import org.apache.ignite.*;
-import org.apache.ignite.ignitefs.*;
-import org.jetbrains.annotations.*;
-
-import java.io.*;
-import java.util.*;
-
-/**
- * Example that shows usage of {@link org.apache.ignite.IgniteFs} API. It starts a node with {@code IgniteFs}
- * configured and performs several file system operations (create, write, append, read and delete
- * files, create, list and delete directories).
- * <p>
- * Remote nodes should always be started with configuration file which includes
- * IgniteFs: {@code 'ignite.sh examples/config/filesystem/example-ignitefs.xml'}.
- * <p>
- * Alternatively you can run {@link IgniteFsNodeStartup} in another JVM which will start
- * node with {@code examples/config/filesystem/example-ignitefs.xml} configuration.
- */
-public final class IgniteFsExample {
-    /**
-     * Executes example.
-     *
-     * @param args Command line arguments, none required.
-     * @throws Exception If example execution failed.
-     */
-    public static void main(String[] args) throws Exception {
-        Ignite ignite = Ignition.start("examples/config/filesystem/example-ignitefs.xml");
-
-        System.out.println();
-        System.out.println(">>> IgniteFs example started.");
-
-        try {
-            // Get an instance of Ignite File System.
-            IgniteFs fs = ignite.fileSystem("ignitefs");
-
-            // Working directory path.
-            IgfsPath workDir = new IgfsPath("/examples/fs");
-
-            // Cleanup working directory.
-            delete(fs, workDir);
-
-            // Create empty working directory.
-            mkdirs(fs, workDir);
-
-            // Print information for working directory.
-            printInfo(fs, workDir);
-
-            // File path.
-            IgfsPath filePath = new IgfsPath(workDir, "file.txt");
-
-            // Create file.
-            create(fs, filePath, new byte[] {1, 2, 3});
-
-            // Print information for file.
-            printInfo(fs, filePath);
-
-            // Append more data to previously created file.
-            append(fs, filePath, new byte[] {4, 5});
-
-            // Print information for file.
-            printInfo(fs, filePath);
-
-            // Read data from file.
-            read(fs, filePath);
-
-            // Delete file.
-            delete(fs, filePath);
-
-            // Print information for file.
-            printInfo(fs, filePath);
-
-            // Create several files.
-            for (int i = 0; i < 5; i++)
-                create(fs, new IgfsPath(workDir, "file-" + i + ".txt"), null);
-
-            list(fs, workDir);
-        }
-        finally {
-            Ignition.stop(false);
-        }
-    }
-
-    /**
-     * Deletes file or directory. If directory
-     * is not empty, it's deleted recursively.
-     *
-     * @param fs IgniteFs.
-     * @param path File or directory path.
-     * @throws IgniteException In case of error.
-     */
-    private static void delete(IgniteFs fs, IgfsPath path) throws IgniteException {
-        assert fs != null;
-        assert path != null;
-
-        if (fs.exists(path)) {
-            boolean isFile = fs.info(path).isFile();
-
-            try {
-                fs.delete(path, true);
-
-                System.out.println();
-                System.out.println(">>> Deleted " + (isFile ? "file" : "directory") + ": " + path);
-            }
-            catch (IgfsException e) {
-                System.out.println();
-                System.out.println(">>> Failed to delete " + (isFile ? "file" : "directory") + " [path=" + path +
-                    ", msg=" + e.getMessage() + ']');
-            }
-        }
-        else {
-            System.out.println();
-            System.out.println(">>> Won't delete file or directory (doesn't exist): " + path);
-        }
-    }
-
-    /**
-     * Creates directories.
-     *
-     * @param fs IgniteFs.
-     * @param path Directory path.
-     * @throws IgniteException In case of error.
-     */
-    private static void mkdirs(IgniteFs fs, IgfsPath path) throws IgniteException {
-        assert fs != null;
-        assert path != null;
-
-        try {
-            fs.mkdirs(path);
-
-            System.out.println();
-            System.out.println(">>> Created directory: " + path);
-        }
-        catch (IgfsException e) {
-            System.out.println();
-            System.out.println(">>> Failed to create a directory [path=" + path + ", msg=" + e.getMessage() + ']');
-        }
-
-        System.out.println();
-    }
-
-    /**
-     * Creates file and writes provided data to it.
-     *
-     * @param fs IgniteFs.
-     * @param path File path.
-     * @param data Data.
-     * @throws IgniteException If file can't be created.
-     * @throws IOException If data can't be written.
-     */
-    private static void create(IgniteFs fs, IgfsPath path, @Nullable byte[] data)
-        throws IgniteException, IOException {
-        assert fs != null;
-        assert path != null;
-
-        try (OutputStream out = fs.create(path, true)) {
-            System.out.println();
-            System.out.println(">>> Created file: " + path);
-
-            if (data != null) {
-                out.write(data);
-
-                System.out.println();
-                System.out.println(">>> Wrote data to file: " + path);
-            }
-        }
-
-        System.out.println();
-    }
-
-    /**
-     * Opens file and appends provided data to it.
-     *
-     * @param fs IgniteFs.
-     * @param path File path.
-     * @param data Data.
-     * @throws IgniteException If file can't be created.
-     * @throws IOException If data can't be written.
-     */
-    private static void append(IgniteFs fs, IgfsPath path, byte[] data) throws IgniteException, IOException {
-        assert fs != null;
-        assert path != null;
-        assert data != null;
-        assert fs.info(path).isFile();
-
-        try (OutputStream out = fs.append(path, true)) {
-            System.out.println();
-            System.out.println(">>> Opened file: " + path);
-
-            out.write(data);
-        }
-
-        System.out.println();
-        System.out.println(">>> Appended data to file: " + path);
-    }
-
-    /**
-     * Opens file and reads it to byte array.
-     *
-     * @param fs IgniteFs.
-     * @param path File path.
-     * @throws IgniteException If file can't be opened.
-     * @throws IOException If data can't be read.
-     */
-    private static void read(IgniteFs fs, IgfsPath path) throws IgniteException, IOException {
-        assert fs != null;
-        assert path != null;
-        assert fs.info(path).isFile();
-
-        byte[] data = new byte[(int)fs.info(path).length()];
-
-        try (IgfsInputStream in = fs.open(path)) {
-            in.read(data);
-        }
-
-        System.out.println();
-        System.out.println(">>> Read data from " + path + ": " + Arrays.toString(data));
-    }
-
-    /**
-     * Lists files in directory.
-     *
-     * @param fs IgniteFs.
-     * @param path Directory path.
-     * @throws IgniteException In case of error.
-     */
-    private static void list(IgniteFs fs, IgfsPath path) throws IgniteException {
-        assert fs != null;
-        assert path != null;
-        assert fs.info(path).isDirectory();
-
-        Collection<IgfsPath> files = fs.listPaths(path);
-
-        if (files.isEmpty()) {
-            System.out.println();
-            System.out.println(">>> No files in directory: " + path);
-        }
-        else {
-            System.out.println();
-            System.out.println(">>> List of files in directory: " + path);
-
-            for (IgfsPath f : files)
-                System.out.println(">>>     " + f.name());
-        }
-
-        System.out.println();
-    }
-
-    /**
-     * Prints information for file or directory.
-     *
-     * @param fs IgniteFs.
-     * @param path File or directory path.
-     * @throws IgniteException In case of error.
-     */
-    private static void printInfo(IgniteFs fs, IgfsPath path) throws IgniteException {
-        System.out.println();
-        System.out.println("Information for " + path + ": " + fs.info(path));
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsMapReduceExample.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsMapReduceExample.java b/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsMapReduceExample.java
deleted file mode 100644
index b5f913c..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsMapReduceExample.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples.ignitefs;
-
-import org.apache.ignite.*;
-import org.apache.ignite.compute.*;
-import org.apache.ignite.ignitefs.*;
-import org.apache.ignite.ignitefs.mapreduce.*;
-import org.apache.ignite.ignitefs.mapreduce.records.*;
-
-import java.io.*;
-import java.util.*;
-
-/**
- * Example that shows how to use {@link org.apache.ignite.ignitefs.mapreduce.IgfsTask} to find lines matching particular pattern in the file in pretty
- * the same way as {@code grep} command does.
- * <p>
- * Remote nodes should always be started with configuration file which includes
- * IgniteFs: {@code 'ignite.sh examples/config/filesystem/example-ignitefs.xml'}.
- * <p>
- * Alternatively you can run {@link IgniteFsNodeStartup} in another JVM which will start
- * node with {@code examples/config/filesystem/example-ignitefs.xml} configuration.
- */
-public class IgniteFsMapReduceExample {
-    /**
-     * Executes example.
-     *
-     * @param args Command line arguments. First argument is file name, second argument is regex to look for.
-     * @throws Exception If failed.
-     */
-    public static void main(String[] args) throws Exception {
-        if (args.length == 0)
-            System.out.println("Please provide file name and regular expression.");
-        else if (args.length == 1)
-            System.out.println("Please provide regular expression.");
-        else {
-            try (Ignite ignite = Ignition.start("examples/config/filesystem/example-ignitefs.xml")) {
-                System.out.println();
-                System.out.println(">>> IgniteFs map reduce example started.");
-
-                // Prepare arguments.
-                String fileName = args[0];
-
-                File file = new File(fileName);
-
-                String regexStr = args[1];
-
-                // Get an instance of Ignite File System.
-                IgniteFs fs = ignite.fileSystem("ignitefs");
-
-                // Working directory path.
-                IgfsPath workDir = new IgfsPath("/examples/fs");
-
-                // Write file to GGFS.
-                IgfsPath fsPath = new IgfsPath(workDir, file.getName());
-
-                writeFile(fs, fsPath, file);
-
-                Collection<Line> lines = fs.execute(new GrepTask(), IgfsNewLineRecordResolver.NEW_LINE,
-                    Collections.singleton(fsPath), regexStr);
-
-                if (lines.isEmpty()) {
-                    System.out.println();
-                    System.out.println("No lines were found.");
-                }
-                else {
-                    for (Line line : lines)
-                        print(line.fileLine());
-                }
-            }
-        }
-    }
-
-    /**
-     * Write file to the Ignite file system.
-     *
-     * @param fs Ignite file system.
-     * @param fsPath Ignite file system path.
-     * @param file File to write.
-     * @throws Exception In case of exception.
-     */
-    private static void writeFile(IgniteFs fs, IgfsPath fsPath, File file) throws Exception {
-        System.out.println();
-        System.out.println("Copying file to IgniteFs: " + file);
-
-        try (
-            IgfsOutputStream os = fs.create(fsPath, true);
-            FileInputStream fis = new FileInputStream(file)
-        ) {
-            byte[] buf = new byte[2048];
-
-            int read = fis.read(buf);
-
-            while (read != -1) {
-                os.write(buf, 0, read);
-
-                read = fis.read(buf);
-            }
-        }
-    }
-
-    /**
-     * Print particular string.
-     *
-     * @param str String.
-     */
-    private static void print(String str) {
-        System.out.println(">>> " + str);
-    }
-
-    /**
-     * Grep task.
-     */
-    private static class GrepTask extends IgfsTask<String, Collection<Line>> {
-        /** {@inheritDoc} */
-        @Override public IgfsJob createJob(IgfsPath path, IgfsFileRange range,
-            IgfsTaskArgs<String> args) {
-            return new GrepJob(args.userArgument());
-        }
-
-        /** {@inheritDoc} */
-        @Override public Collection<Line> reduce(List<ComputeJobResult> results) {
-            Collection<Line> lines = new TreeSet<>(new Comparator<Line>() {
-                @Override public int compare(Line line1, Line line2) {
-                    return line1.rangePosition() < line2.rangePosition() ? -1 :
-                        line1.rangePosition() > line2.rangePosition() ? 1 : line1.lineIndex() - line2.lineIndex();
-                }
-            });
-
-            for (ComputeJobResult res : results) {
-                if (res.getException() != null)
-                    throw res.getException();
-
-                Collection<Line> line = res.getData();
-
-                if (line != null)
-                    lines.addAll(line);
-            }
-
-            return lines;
-        }
-    }
-
-    /**
-     * Grep job.
-     */
-    private static class GrepJob extends IgfsInputStreamJobAdapter {
-        /** Regex string. */
-        private final String regex;
-
-        /**
-         * Constructor.
-         *
-         * @param regex Regex string.
-         */
-        private GrepJob(String regex) {
-            this.regex = regex;
-        }
-
-        /**  {@inheritDoc} */
-        @Override public Object execute(IgniteFs igniteFs, IgfsRangeInputStream in) throws IgniteException, IOException {
-            Collection<Line> res = null;
-
-            long start = in.startOffset();
-
-            try (BufferedReader br = new BufferedReader(new InputStreamReader(in))) {
-                int ctr = 0;
-
-                String line = br.readLine();
-
-                while (line != null) {
-                    if (line.matches(".*" + regex + ".*")) {
-                        if (res == null)
-                            res = new HashSet<>();
-
-                        res.add(new Line(start, ctr++, line));
-                    }
-
-                    line = br.readLine();
-                }
-            }
-
-            return res;
-        }
-    }
-
-    /**
-     * Single file line with it's position.
-     */
-    private static class Line {
-        /** Line start position in the file. */
-        private long rangePos;
-
-        /** Matching line index within the range. */
-        private final int lineIdx;
-
-        /** File line. */
-        private String line;
-
-        /**
-         * Constructor.
-         *
-         * @param rangePos Range position.
-         * @param lineIdx Matching line index within the range.
-         * @param line File line.
-         */
-        private Line(long rangePos, int lineIdx, String line) {
-            this.rangePos = rangePos;
-            this.lineIdx = lineIdx;
-            this.line = line;
-        }
-
-        /**
-         * @return Range position.
-         */
-        public long rangePosition() {
-            return rangePos;
-        }
-
-        /**
-         * @return Matching line index within the range.
-         */
-        public int lineIndex() {
-            return lineIdx;
-        }
-
-        /**
-         * @return File line.
-         */
-        public String fileLine() {
-            return line;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsNodeStartup.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsNodeStartup.java b/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsNodeStartup.java
deleted file mode 100644
index d331077..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/ignitefs/IgniteFsNodeStartup.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples.ignitefs;
-
-import org.apache.ignite.*;
-
-/**
- * Starts up an empty node with IgniteFs configuration.
- * You can also start a stand-alone Ignite instance by passing the path
- * to configuration file to {@code 'ignite.{sh|bat}'} script, like so:
- * {@code 'ignite.sh examples/config/filesystem/example-ignitefs.xml'}.
- * <p>
- * The difference is that running this class from IDE adds all example classes to classpath
- * but running from command line doesn't.
- */
-public class IgniteFsNodeStartup {
-    /**
-     * Start up an empty node with specified cache configuration.
-     *
-     * @param args Command line arguments, none required.
-     * @throws IgniteException If example execution failed.
-     */
-    public static void main(String[] args) throws IgniteException {
-        Ignition.start("examples/config/filesystem/example-ignitefs.xml");
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/examples/src/test/java/org/apache/ignite/examples/IgfsExamplesSelfTest.java
----------------------------------------------------------------------
diff --git a/examples/src/test/java/org/apache/ignite/examples/IgfsExamplesSelfTest.java b/examples/src/test/java/org/apache/ignite/examples/IgfsExamplesSelfTest.java
index c21634a..1d421f7 100644
--- a/examples/src/test/java/org/apache/ignite/examples/IgfsExamplesSelfTest.java
+++ b/examples/src/test/java/org/apache/ignite/examples/IgfsExamplesSelfTest.java
@@ -42,7 +42,7 @@ public class IgfsExamplesSelfTest extends GridAbstractExamplesTest {
             startGrid("test2", configPath);
             startGrid("test3", configPath);
 
-            IgniteFsExample.main(EMPTY_ARGS);
+            IgfsExample.main(EMPTY_ARGS);
         }
         finally {
             stopAllGrids();

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/modules/core/src/main/java/org/apache/ignite/Ignite.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/Ignite.java b/modules/core/src/main/java/org/apache/ignite/Ignite.java
index 1d665f8..f3b86fb 100644
--- a/modules/core/src/main/java/org/apache/ignite/Ignite.java
+++ b/modules/core/src/main/java/org/apache/ignite/Ignite.java
@@ -196,9 +196,9 @@ public interface Ignite extends AutoCloseable {
      * @param <V> Value type.
      * @param name Cache name.
      * @return Cache instance for given name.
-     * @see IgniteFsConfiguration
-     * @see IgniteFsConfiguration#getDataCacheName()
-     * @see IgniteFsConfiguration#getMetaCacheName()
+     * @see org.apache.ignite.configuration.IgfsConfiguration
+     * @see org.apache.ignite.configuration.IgfsConfiguration#getDataCacheName()
+     * @see org.apache.ignite.configuration.IgfsConfiguration#getMetaCacheName()
      */
     public <K, V> GridCache<K, V> cache(@Nullable String name);
 
@@ -206,9 +206,9 @@ public interface Ignite extends AutoCloseable {
      * Gets all configured caches.
      * Caches that are used as GGFS meta and data caches will not be returned in resulting collection.
      *
-     * @see IgniteFsConfiguration
-     * @see IgniteFsConfiguration#getDataCacheName()
-     * @see IgniteFsConfiguration#getMetaCacheName()
+     * @see org.apache.ignite.configuration.IgfsConfiguration
+     * @see org.apache.ignite.configuration.IgfsConfiguration#getDataCacheName()
+     * @see org.apache.ignite.configuration.IgfsConfiguration#getMetaCacheName()
      * @return All configured caches.
      */
     public Collection<GridCache<?, ?>> caches();

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/modules/core/src/main/java/org/apache/ignite/IgniteFs.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteFs.java b/modules/core/src/main/java/org/apache/ignite/IgniteFs.java
index 47632cf..9603917 100644
--- a/modules/core/src/main/java/org/apache/ignite/IgniteFs.java
+++ b/modules/core/src/main/java/org/apache/ignite/IgniteFs.java
@@ -75,7 +75,7 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport {
      *
      * @return GGFS configuration.
      */
-    public IgniteFsConfiguration configuration();
+    public IgfsConfiguration configuration();
 
     /**
      * Gets summary (total number of files, total number of directories and total length)
@@ -284,7 +284,7 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport {
 
     /**
      * Executes GGFS task with overridden maximum range length (see
-     * {@link org.apache.ignite.configuration.IgniteFsConfiguration#getMaximumTaskRangeLength()} for more information).
+     * {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()} for more information).
      * <p>
      * Supports asynchronous execution (see {@link IgniteAsyncSupport}).
      *
@@ -322,7 +322,7 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport {
 
     /**
      * Executes GGFS task with overridden maximum range length (see
-     * {@link org.apache.ignite.configuration.IgniteFsConfiguration#getMaximumTaskRangeLength()} for more information).
+     * {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()} for more information).
      * <p>
      * Supports asynchronous execution (see {@link IgniteAsyncSupport}).
      *

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java
new file mode 100644
index 0000000..ab6021f
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java
@@ -0,0 +1,807 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.configuration;
+
+import org.apache.ignite.ignitefs.*;
+import org.apache.ignite.internal.util.typedef.internal.*;
+import org.jetbrains.annotations.*;
+
+import java.util.*;
+import java.util.concurrent.*;
+
+/**
+ * {@code GGFS} configuration. More than one file system can be configured within grid.
+ * {@code GGFS} configuration is provided via {@link org.apache.ignite.configuration.IgniteConfiguration#getGgfsConfiguration()}
+ * method.
+ * <p>
+ * Refer to {@code config/hadoop/default-config.xml} or {@code config/hadoop/default-config-client.xml}
+ * configuration files under Ignite installation to see sample {@code GGFS} configuration.
+ */
+public class IgfsConfiguration {
+    /** Default file system user name. */
+    public static final String DFLT_USER_NAME = System.getProperty("user.name", "anonymous");
+
+    /** Default IPC port. */
+    public static final int DFLT_IPC_PORT = 10500;
+
+    /** Default fragmentizer throttling block length. */
+    public static final long DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH = 16 * 1024 * 1024;
+
+    /** Default fragmentizer throttling delay. */
+    public static final long DFLT_FRAGMENTIZER_THROTTLING_DELAY = 200;
+
+    /** Default fragmentizer concurrent files. */
+    public static final int DFLT_FRAGMENTIZER_CONCURRENT_FILES = 0;
+
+    /** Default fragmentizer local writes ratio. */
+    public static final float DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO = 0.8f;
+
+    /** Fragmentizer enabled property. */
+    public static final boolean DFLT_FRAGMENTIZER_ENABLED = true;
+
+    /** Default batch size for logging. */
+    public static final int DFLT_GGFS_LOG_BATCH_SIZE = 100;
+
+    /** Default {@code GGFS} log directory. */
+    public static final String DFLT_GGFS_LOG_DIR = "work/ggfs/log";
+
+    /** Default per node buffer size. */
+    public static final int DFLT_PER_NODE_BATCH_SIZE = 100;
+
+    /** Default number of per node parallel operations. */
+    public static final int DFLT_PER_NODE_PARALLEL_BATCH_CNT = 8;
+
+    /** Default GGFS mode. */
+    public static final IgfsMode DFLT_MODE = IgfsMode.DUAL_ASYNC;
+
+    /** Default file's data block size (bytes). */
+    public static final int DFLT_BLOCK_SIZE = 1 << 16;
+
+    /** Default read/write buffers size (bytes). */
+    public static final int DFLT_BUF_SIZE = 1 << 16;
+
+    /** Default trash directory purge await timeout in case data cache oversize is detected. */
+    public static final long DFLT_TRASH_PURGE_TIMEOUT = 1000;
+
+    /** Default management port. */
+    public static final int DFLT_MGMT_PORT = 11400;
+
+    /** Default IPC endpoint enabled flag. */
+    public static final boolean DFLT_IPC_ENDPOINT_ENABLED = true;
+
+    /** GGFS instance name. */
+    private String name;
+
+    /** Cache name to store GGFS meta information. */
+    private String metaCacheName;
+
+    /** Cache name to store file's data blocks. */
+    private String dataCacheName;
+
+    /** File's data block size (bytes). */
+    private int blockSize = DFLT_BLOCK_SIZE;
+
+    /** The number of pre-fetched blocks if specific file's chunk is requested. */
+    private int prefetchBlocks;
+
+    /** Amount of sequential block reads before prefetch is triggered. */
+    private int seqReadsBeforePrefetch;
+
+    /** Read/write buffers size for stream operations (bytes). */
+    private int bufSize = DFLT_BUF_SIZE;
+
+    /** Per node buffer size. */
+    private int perNodeBatchSize = DFLT_PER_NODE_BATCH_SIZE;
+
+    /** Per node parallel operations. */
+    private int perNodeParallelBatchCnt = DFLT_PER_NODE_PARALLEL_BATCH_CNT;
+
+    /** IPC endpoint properties to publish GGFS over. */
+    private Map<String, String> ipcEndpointCfg;
+
+    /** IPC endpoint enabled flag. */
+    private boolean ipcEndpointEnabled = DFLT_IPC_ENDPOINT_ENABLED;
+
+    /** Management port. */
+    private int mgmtPort = DFLT_MGMT_PORT;
+
+    /** Secondary file system */
+    private Igfs secondaryFs;
+
+    /** GGFS mode. */
+    private IgfsMode dfltMode = DFLT_MODE;
+
+    /** Fragmentizer throttling block length. */
+    private long fragmentizerThrottlingBlockLen = DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH;
+
+    /** Fragmentizer throttling delay. */
+    private long fragmentizerThrottlingDelay = DFLT_FRAGMENTIZER_THROTTLING_DELAY;
+
+    /** Fragmentizer concurrent files. */
+    private int fragmentizerConcurrentFiles = DFLT_FRAGMENTIZER_CONCURRENT_FILES;
+
+    /** Fragmentizer local writes ratio. */
+    private float fragmentizerLocWritesRatio = DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO;
+
+    /** Fragmentizer enabled flag. */
+    private boolean fragmentizerEnabled = DFLT_FRAGMENTIZER_ENABLED;
+
+    /** Path modes. */
+    private Map<String, IgfsMode> pathModes;
+
+    /** Maximum space. */
+    private long maxSpace;
+
+    /** Trash purge await timeout. */
+    private long trashPurgeTimeout = DFLT_TRASH_PURGE_TIMEOUT;
+
+    /** Dual mode PUT operations executor service. */
+    private ExecutorService dualModePutExec;
+
+    /** Dual mode PUT operations executor service shutdown flag. */
+    private boolean dualModePutExecShutdown;
+
+    /** Maximum amount of data in pending puts. */
+    private long dualModeMaxPendingPutsSize;
+
+    /** Maximum range length. */
+    private long maxTaskRangeLen;
+
+    /**
+     * Constructs default configuration.
+     */
+    public IgfsConfiguration() {
+        // No-op.
+    }
+
+    /**
+     * Constructs the copy of the configuration.
+     *
+     * @param cfg Configuration to copy.
+     */
+    public IgfsConfiguration(IgfsConfiguration cfg) {
+        assert cfg != null;
+
+        /*
+         * Must preserve alphabetical order!
+         */
+        blockSize = cfg.getBlockSize();
+        bufSize = cfg.getStreamBufferSize();
+        dataCacheName = cfg.getDataCacheName();
+        dfltMode = cfg.getDefaultMode();
+        dualModeMaxPendingPutsSize = cfg.getDualModeMaxPendingPutsSize();
+        dualModePutExec = cfg.getDualModePutExecutorService();
+        dualModePutExecShutdown = cfg.getDualModePutExecutorServiceShutdown();
+        fragmentizerConcurrentFiles = cfg.getFragmentizerConcurrentFiles();
+        fragmentizerLocWritesRatio = cfg.getFragmentizerLocalWritesRatio();
+        fragmentizerEnabled = cfg.isFragmentizerEnabled();
+        fragmentizerThrottlingBlockLen = cfg.getFragmentizerThrottlingBlockLength();
+        fragmentizerThrottlingDelay = cfg.getFragmentizerThrottlingDelay();
+        secondaryFs = cfg.getSecondaryFileSystem();
+        ipcEndpointCfg = cfg.getIpcEndpointConfiguration();
+        ipcEndpointEnabled = cfg.isIpcEndpointEnabled();
+        maxSpace = cfg.getMaxSpaceSize();
+        maxTaskRangeLen = cfg.getMaximumTaskRangeLength();
+        metaCacheName = cfg.getMetaCacheName();
+        mgmtPort = cfg.getManagementPort();
+        name = cfg.getName();
+        pathModes = cfg.getPathModes();
+        perNodeBatchSize = cfg.getPerNodeBatchSize();
+        perNodeParallelBatchCnt = cfg.getPerNodeParallelBatchCount();
+        prefetchBlocks = cfg.getPrefetchBlocks();
+        seqReadsBeforePrefetch = cfg.getSequentialReadsBeforePrefetch();
+        trashPurgeTimeout = cfg.getTrashPurgeTimeout();
+    }
+
+    /**
+     * Gets GGFS instance name. If {@code null}, then instance with default
+     * name will be used.
+     *
+     * @return GGFS instance name.
+     */
+    @Nullable public String getName() {
+        return name;
+    }
+
+    /**
+     * Sets GGFS instance name.
+     *
+     * @param name GGFS instance name.
+     */
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    /**
+     * Cache name to store GGFS meta information. If {@code null}, then instance
+     * with default meta-cache name will be used.
+     *
+     * @return Cache name to store GGFS meta information.
+     */
+    @Nullable public String getMetaCacheName() {
+        return metaCacheName;
+    }
+
+    /**
+     * Sets cache name to store GGFS meta information.
+     *
+     * @param metaCacheName Cache name to store GGFS meta information.
+     */
+    public void setMetaCacheName(String metaCacheName) {
+        this.metaCacheName = metaCacheName;
+    }
+
+    /**
+     * Cache name to store GGFS data.
+     *
+     * @return Cache name to store GGFS data.
+     */
+    @Nullable public String getDataCacheName() {
+        return dataCacheName;
+    }
+
+    /**
+     * Sets cache name to store GGFS data.
+     *
+     * @param dataCacheName Cache name to store GGFS data.
+     */
+    public void setDataCacheName(String dataCacheName) {
+        this.dataCacheName = dataCacheName;
+    }
+
+    /**
+     * Get file's data block size.
+     *
+     * @return File's data block size.
+     */
+    public int getBlockSize() {
+        return blockSize;
+    }
+
+    /**
+     * Sets file's data block size.
+     *
+     * @param blockSize File's data block size (bytes) or {@code 0} to reset default value.
+     */
+    public void setBlockSize(int blockSize) {
+        A.ensure(blockSize >= 0, "blockSize >= 0");
+
+        this.blockSize = blockSize == 0 ? DFLT_BLOCK_SIZE : blockSize;
+    }
+
+    /**
+     * Get number of pre-fetched blocks if specific file's chunk is requested.
+     *
+     * @return The number of pre-fetched blocks.
+     */
+    public int getPrefetchBlocks() {
+        return prefetchBlocks;
+    }
+
+    /**
+     * Sets the number of pre-fetched blocks if specific file's chunk is requested.
+     *
+     * @param prefetchBlocks New number of pre-fetched blocks.
+     */
+    public void setPrefetchBlocks(int prefetchBlocks) {
+        A.ensure(prefetchBlocks >= 0, "prefetchBlocks >= 0");
+
+        this.prefetchBlocks = prefetchBlocks;
+    }
+
+    /**
+     * Get amount of sequential block reads before prefetch is triggered. The
+     * higher this value, the longer GGFS will wait before starting to prefetch
+     * values ahead of time. Depending on the use case, this can either help
+     * or hurt performance.
+     * <p>
+     * Default is {@code 0} which means that pre-fetching will start right away.
+     * <h1 class="header">Integration With Hadoop</h1>
+     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
+     * {@code org.apache.ignite.ignitefs.hadoop.GridGgfsHadoopParameters.PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH}
+     * configuration property directly to Hadoop MapReduce task.
+     * <p>
+     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
+     *
+     * @return Amount of sequential block reads.
+     */
+    public int getSequentialReadsBeforePrefetch() {
+        return seqReadsBeforePrefetch;
+    }
+
+    /**
+     * Sets amount of sequential block reads before prefetch is triggered. The
+     * higher this value, the longer GGFS will wait before starting to prefetch
+     * values ahead of time. Depending on the use case, this can either help
+     * or hurt performance.
+     * <p>
+     * Default is {@code 0} which means that pre-fetching will start right away.
+     * <h1 class="header">Integration With Hadoop</h1>
+     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
+     * {@code org.apache.ignite.ignitefs.hadoop.GridGgfsHadoopParameters.PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH}
+     * configuration property directly to Hadoop MapReduce task.
+     * <p>
+     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
+     *
+     * @param seqReadsBeforePrefetch Amount of sequential block reads before prefetch is triggered.
+     */
+    public void setSequentialReadsBeforePrefetch(int seqReadsBeforePrefetch) {
+        A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0");
+
+        this.seqReadsBeforePrefetch = seqReadsBeforePrefetch;
+    }
+
+    /**
+     * Get read/write buffer size for {@code GGFS} stream operations in bytes.
+     *
+     * @return Read/write buffers size (bytes).
+     */
+    public int getStreamBufferSize() {
+        return bufSize;
+    }
+
+    /**
+     * Sets read/write buffers size for {@code GGFS} stream operations (bytes).
+     *
+     * @param bufSize Read/write buffers size for stream operations (bytes) or {@code 0} to reset default value.
+     */
+    public void setStreamBufferSize(int bufSize) {
+        A.ensure(bufSize >= 0, "bufSize >= 0");
+
+        this.bufSize = bufSize == 0 ? DFLT_BUF_SIZE : bufSize;
+    }
+
+    /**
+     * Gets number of file blocks buffered on local node before sending batch to remote node.
+     *
+     * @return Per node buffer size.
+     */
+    public int getPerNodeBatchSize() {
+        return perNodeBatchSize;
+    }
+
+    /**
+     * Sets number of file blocks collected on local node before sending batch to remote node.
+     *
+     * @param perNodeBatchSize Per node buffer size.
+     */
+    public void setPerNodeBatchSize(int perNodeBatchSize) {
+        this.perNodeBatchSize = perNodeBatchSize;
+    }
+
+    /**
+     * Gets number of batches that can be concurrently sent to remote node.
+     *
+     * @return Number of batches for each node.
+     */
+    public int getPerNodeParallelBatchCount() {
+        return perNodeParallelBatchCnt;
+    }
+
+    /**
+     * Sets number of file block batches that can be concurrently sent to remote node.
+     *
+     * @param perNodeParallelBatchCnt Per node parallel load operations.
+     */
+    public void setPerNodeParallelBatchCount(int perNodeParallelBatchCnt) {
+        this.perNodeParallelBatchCnt = perNodeParallelBatchCnt;
+    }
+
+    /**
+     * Gets map of IPC endpoint configuration properties. There are 2 different
+     * types of endpoint supported: {@code shared-memory}, and {@code TCP}.
+     * <p>
+     * The following configuration properties are supported for {@code shared-memory}
+     * endpoint:
+     * <ul>
+     *     <li>{@code type} - value is {@code shmem} to specify {@code shared-memory} approach.</li>
+     *     <li>{@code port} - endpoint port.</li>
+     *     <li>{@code size} - memory size allocated for single endpoint communication.</li>
+     *     <li>
+     *         {@code tokenDirectoryPath} - path, either absolute or relative to {@code IGNITE_HOME} to
+     *         store shared memory tokens.
+     *     </li>
+     * </ul>
+     * <p>
+     * The following configuration properties are supported for {@code TCP} approach:
+     * <ul>
+     *     <li>{@code type} - value is {@code tcp} to specify {@code TCP} approach.</li>
+     *     <li>{@code port} - endpoint bind port.</li>
+     *     <li>
+     *         {@code host} - endpoint bind host. If omitted '127.0.0.1' will be used.
+     *     </li>
+     * </ul>
+     * <p>
+     * Note that {@code shared-memory} approach is not supported on Windows environments.
+     * In case GGFS is failed to bind to particular port, further attempts will be performed every 3 seconds.
+     *
+     * @return Map of IPC endpoint configuration properties. In case the value is not set, defaults will be used. Default
+     * type for Windows is "tcp", for all other platforms - "shmem". Default port is {@link #DFLT_IPC_PORT}.
+     */
+    @Nullable public Map<String,String> getIpcEndpointConfiguration() {
+        return ipcEndpointCfg;
+    }
+
+    /**
+     * Sets IPC endpoint configuration to publish GGFS over.
+     *
+     * @param ipcEndpointCfg Map of IPC endpoint config properties.
+     */
+    public void setIpcEndpointConfiguration(@Nullable Map<String,String> ipcEndpointCfg) {
+        this.ipcEndpointCfg = ipcEndpointCfg;
+    }
+
+    /**
+     * Get IPC endpoint enabled flag. In case it is set to {@code true} endpoint will be created and bound to specific
+     * port. Otherwise endpoint will not be created. Default value is {@link #DFLT_IPC_ENDPOINT_ENABLED}.
+     *
+     * @return {@code True} in case endpoint is enabled.
+     */
+    public boolean isIpcEndpointEnabled() {
+        return ipcEndpointEnabled;
+    }
+
+    /**
+     * Set IPC endpoint enabled flag. See {@link #isIpcEndpointEnabled()}.
+     *
+     * @param ipcEndpointEnabled IPC endpoint enabled flag.
+     */
+    public void setIpcEndpointEnabled(boolean ipcEndpointEnabled) {
+        this.ipcEndpointEnabled = ipcEndpointEnabled;
+    }
+
+    /**
+     * Gets port number for management endpoint. All GGFS nodes should have this port open
+     * for Visor Management Console to work with GGFS.
+     * <p>
+     * Default value is {@link #DFLT_MGMT_PORT}
+     *
+     * @return Port number or {@code -1} if management endpoint should be disabled.
+     */
+    public int getManagementPort() {
+        return mgmtPort;
+    }
+
+    /**
+     * Sets management endpoint port.
+     *
+     * @param mgmtPort port number or {@code -1} to disable management endpoint.
+     */
+    public void setManagementPort(int mgmtPort) {
+        this.mgmtPort = mgmtPort;
+    }
+
+    /**
+     * Gets mode to specify how {@code GGFS} interacts with Hadoop file system, like {@code HDFS}.
+     * Secondary Hadoop file system is provided for pass-through, write-through, and read-through
+     * purposes.
+     * <p>
+     * Default mode is {@link org.apache.ignite.ignitefs.IgfsMode#DUAL_ASYNC}. If secondary Hadoop file system is
+     * not configured, this mode will work just like {@link org.apache.ignite.ignitefs.IgfsMode#PRIMARY} mode.
+     *
+     * @return Mode to specify how GGFS interacts with secondary HDFS file system.
+     */
+    public IgfsMode getDefaultMode() {
+        return dfltMode;
+    }
+
+    /**
+     * Sets {@code GGFS} mode to specify how it should interact with secondary
+     * Hadoop file system, like {@code HDFS}. Secondary Hadoop file system is provided
+     * for pass-through, write-through, and read-through purposes.
+     *
+     * @param dfltMode {@code GGFS} mode.
+     */
+    public void setDefaultMode(IgfsMode dfltMode) {
+        this.dfltMode = dfltMode;
+    }
+
+    /**
+     * Gets the secondary file system. Secondary file system is provided for pass-through, write-through,
+     * and read-through purposes.
+     *
+     * @return Secondary file system.
+     */
+    public Igfs getSecondaryFileSystem() {
+        return secondaryFs;
+    }
+
+    /**
+     * Sets the secondary file system. Secondary file system is provided for pass-through, write-through,
+     * and read-through purposes.
+     *
+     * @param fileSystem
+     */
+    public void setSecondaryFileSystem(Igfs fileSystem) {
+        secondaryFs = fileSystem;
+    }
+
+    /**
+     * Gets map of path prefixes to {@code GGFS} modes used for them.
+     * <p>
+     * If path doesn't correspond to any specified prefix or mappings are not provided, then
+     * {@link #getDefaultMode()} is used.
+     * <p>
+     * Several folders under {@code '/apache/ignite'} folder have predefined mappings which cannot be overridden.
+     * <li>{@code /apache/ignite/primary} and all it's sub-folders will always work in {@code PRIMARY} mode.</li>
+     * <p>
+     * And in case secondary file system URI is provided:
+     * <li>{@code /apache/ignite/proxy} and all it's sub-folders will always work in {@code PROXY} mode.</li>
+     * <li>{@code /apache/ignite/sync} and all it's sub-folders will always work in {@code DUAL_SYNC} mode.</li>
+     * <li>{@code /apache/ignite/async} and all it's sub-folders will always work in {@code DUAL_ASYNC} mode.</li>
+     *
+     * @return Map of paths to {@code GGFS} modes.
+     */
+    @Nullable public Map<String, IgfsMode> getPathModes() {
+        return pathModes;
+    }
+
+    /**
+     * Sets map of path prefixes to {@code GGFS} modes used for them.
+     * <p>
+     * If path doesn't correspond to any specified prefix or mappings are not provided, then
+     * {@link #getDefaultMode()} is used.
+     *
+     * @param pathModes Map of paths to {@code GGFS} modes.
+     */
+    public void setPathModes(Map<String, IgfsMode> pathModes) {
+        this.pathModes = pathModes;
+    }
+
+    /**
+     * Gets the length of file chunk to send before delaying the fragmentizer.
+     *
+     * @return File chunk length in bytes.
+     */
+    public long getFragmentizerThrottlingBlockLength() {
+        return fragmentizerThrottlingBlockLen;
+    }
+
+    /**
+     * Sets length of file chunk to transmit before throttling is delayed.
+     *
+     * @param fragmentizerThrottlingBlockLen Block length in bytes.
+     */
+    public void setFragmentizerThrottlingBlockLength(long fragmentizerThrottlingBlockLen) {
+        this.fragmentizerThrottlingBlockLen = fragmentizerThrottlingBlockLen;
+    }
+
+    /**
+     * Gets throttle delay for fragmentizer.
+     *
+     * @return Throttle delay in milliseconds.
+     */
+    public long getFragmentizerThrottlingDelay() {
+        return fragmentizerThrottlingDelay;
+    }
+
+    /**
+     * Sets delay in milliseconds for which fragmentizer is paused.
+     *
+     * @param fragmentizerThrottlingDelay Delay in milliseconds.
+     */
+    public void setFragmentizerThrottlingDelay(long fragmentizerThrottlingDelay) {
+        this.fragmentizerThrottlingDelay = fragmentizerThrottlingDelay;
+    }
+
+    /**
+     * Gets number of files that can be processed by fragmentizer concurrently.
+     *
+     * @return Number of files to process concurrently.
+     */
+    public int getFragmentizerConcurrentFiles() {
+        return fragmentizerConcurrentFiles;
+    }
+
+    /**
+     * Sets number of files to process concurrently by fragmentizer.
+     *
+     * @param fragmentizerConcurrentFiles Number of files to process concurrently.
+     */
+    public void setFragmentizerConcurrentFiles(int fragmentizerConcurrentFiles) {
+        this.fragmentizerConcurrentFiles = fragmentizerConcurrentFiles;
+    }
+
+    /**
+     * Gets amount of local memory (in % of local GGFS max space size) available for local writes
+     * during file creation.
+     * <p>
+     * If current GGFS space size is less than {@code fragmentizerLocalWritesRatio * maxSpaceSize},
+     * then file blocks will be written to the local node first and then asynchronously distributed
+     * among cluster nodes (fragmentized).
+     * <p>
+     * Default value is {@link #DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO}.
+     *
+     * @return Ratio for local writes space.
+     */
+    public float getFragmentizerLocalWritesRatio() {
+        return fragmentizerLocWritesRatio;
+    }
+
+    /**
+     * Sets ratio for space available for local file writes.
+     *
+     * @param fragmentizerLocWritesRatio Ratio for local file writes.
+     * @see #getFragmentizerLocalWritesRatio()
+     */
+    public void setFragmentizerLocalWritesRatio(float fragmentizerLocWritesRatio) {
+        this.fragmentizerLocWritesRatio = fragmentizerLocWritesRatio;
+    }
+
+    /**
+     * Gets flag indicating whether GGFS fragmentizer is enabled. If fragmentizer is disabled, files will be
+     * written in distributed fashion.
+     *
+     * @return Flag indicating whether fragmentizer is enabled.
+     */
+    public boolean isFragmentizerEnabled() {
+        return fragmentizerEnabled;
+    }
+
+    /**
+     * Sets property indicating whether fragmentizer is enabled.
+     *
+     * @param fragmentizerEnabled {@code True} if fragmentizer is enabled.
+     */
+    public void setFragmentizerEnabled(boolean fragmentizerEnabled) {
+        this.fragmentizerEnabled = fragmentizerEnabled;
+    }
+
+    /**
+     * Get maximum space available for data cache to store file system entries.
+     *
+     * @return Maximum space available for data cache.
+     */
+    public long getMaxSpaceSize() {
+        return maxSpace;
+    }
+
+    /**
+     * Set maximum space in bytes available in data cache.
+     *
+     * @param maxSpace Maximum space available in data cache.
+     */
+    public void setMaxSpaceSize(long maxSpace) {
+        this.maxSpace = maxSpace;
+    }
+
+    /**
+     * Gets maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     *
+     * @return Maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     */
+    public long getTrashPurgeTimeout() {
+        return trashPurgeTimeout;
+    }
+
+    /**
+     * Sets maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     *
+     * @param trashPurgeTimeout Maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     */
+    public void setTrashPurgeTimeout(long trashPurgeTimeout) {
+        this.trashPurgeTimeout = trashPurgeTimeout;
+    }
+
+    /**
+     * Get DUAL mode put operation executor service. This executor service will process cache PUT requests for
+     * data which came from the secondary file system and about to be written to GGFS data cache.
+     * In case no executor service is provided, default one will be created with maximum amount of threads equals
+     * to amount of processor cores.
+     *
+     * @return Get DUAL mode put operation executor service
+     */
+    @Nullable public ExecutorService getDualModePutExecutorService() {
+        return dualModePutExec;
+    }
+
+    /**
+     * Set DUAL mode put operations executor service.
+     *
+     * @param dualModePutExec Dual mode put operations executor service.
+     */
+    public void setDualModePutExecutorService(ExecutorService dualModePutExec) {
+        this.dualModePutExec = dualModePutExec;
+    }
+
+    /**
+     * Get DUAL mode put operation executor service shutdown flag.
+     *
+     * @return DUAL mode put operation executor service shutdown flag.
+     */
+    public boolean getDualModePutExecutorServiceShutdown() {
+        return dualModePutExecShutdown;
+    }
+
+    /**
+     * Set DUAL mode put operations executor service shutdown flag.
+     *
+     * @param dualModePutExecShutdown Dual mode put operations executor service shutdown flag.
+     */
+    public void setDualModePutExecutorServiceShutdown(boolean dualModePutExecShutdown) {
+        this.dualModePutExecShutdown = dualModePutExecShutdown;
+    }
+
+    /**
+     * Get maximum amount of pending data read from the secondary file system and waiting to be written to data
+     * cache. {@code 0} or negative value stands for unlimited size.
+     * <p>
+     * By default this value is set to {@code 0}. It is recommended to set positive value in case your
+     * application performs frequent reads of large amount of data from the secondary file system in order to
+     * avoid issues with increasing GC pauses or out-of-memory error.
+     *
+     * @return Maximum amount of pending data read from the secondary file system
+     */
+    public long getDualModeMaxPendingPutsSize() {
+        return dualModeMaxPendingPutsSize;
+    }
+
+    /**
+     * Set maximum amount of data in pending put operations.
+     *
+     * @param dualModeMaxPendingPutsSize Maximum amount of data in pending put operations.
+     */
+    public void setDualModeMaxPendingPutsSize(long dualModeMaxPendingPutsSize) {
+        this.dualModeMaxPendingPutsSize = dualModeMaxPendingPutsSize;
+    }
+
+    /**
+     * Get maximum default range size of a file being split during GGFS task execution. When GGFS task is about to
+     * be executed, it requests file block locations first. Each location is defined as {@link org.apache.ignite.ignitefs.mapreduce.IgfsFileRange} which
+     * has length. In case this parameter is set to positive value, then GGFS will split single file range into smaller
+     * ranges with length not greater that this parameter. The only exception to this case is when maximum task range
+     * length is smaller than file block size. In this case maximum task range size will be overridden and set to file
+     * block size.
+     * <p>
+     * Note that this parameter is applied when task is split into jobs before {@link org.apache.ignite.ignitefs.mapreduce.IgfsRecordResolver} is
+     * applied. Therefore, final file ranges being assigned to particular jobs could be greater than value of this
+     * parameter depending on file data layout and selected resolver type.
+     * <p>
+     * Setting this parameter might be useful when file is highly colocated and have very long consequent data chunks
+     * so that task execution suffers from insufficient parallelism. E.g., in case you have one GGFS node in topology
+     * and want to process 1Gb file, then only single range of length 1Gb will be returned. This will result in
+     * a single job which will be processed in one thread. But in case you provide this configuration parameter and set
+     * maximum range length to 16Mb, then 64 ranges will be returned resulting in 64 jobs which could be executed in
+     * parallel.
+     * <p>
+     * Note that some {@code GridGgfs.execute()} methods can override value of this parameter.
+     * <p>
+     * In case value of this parameter is set to {@code 0} or negative value, it is simply ignored. Default value is
+     * {@code 0}.
+     *
+     * @return Maximum range size of a file being split during GGFS task execution.
+     */
+    public long getMaximumTaskRangeLength() {
+        return maxTaskRangeLen;
+    }
+
+    /**
+     * Set maximum default range size of a file being split during GGFS task execution.
+     * See {@link #getMaximumTaskRangeLength()} for more details.
+     *
+     * @param maxTaskRangeLen Set maximum default range size of a file being split during GGFS task execution.
+     */
+    public void setMaximumTaskRangeLength(long maxTaskRangeLen) {
+        this.maxTaskRangeLen = maxTaskRangeLen;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(IgfsConfiguration.class, this);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/88bf1443/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
index 95e0a0f..50e0c8a 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
@@ -393,7 +393,7 @@ public class IgniteConfiguration {
     private Map<IgnitePredicate<? extends Event>, int[]> lsnrs;
 
     /** GGFS configuration. */
-    private IgniteFsConfiguration[] ggfsCfg;
+    private IgfsConfiguration[] ggfsCfg;
 
     /** Streamer configuration. */
     private StreamerConfiguration[] streamerCfg;
@@ -2142,7 +2142,7 @@ public class IgniteConfiguration {
      *
      * @return GGFS configurations.
      */
-    public IgniteFsConfiguration[] getGgfsConfiguration() {
+    public IgfsConfiguration[] getGgfsConfiguration() {
         return ggfsCfg;
     }
 
@@ -2151,7 +2151,7 @@ public class IgniteConfiguration {
      *
      * @param ggfsCfg GGFS configurations.
      */
-    public void setGgfsConfiguration(IgniteFsConfiguration... ggfsCfg) {
+    public void setGgfsConfiguration(IgfsConfiguration... ggfsCfg) {
         this.ggfsCfg = ggfsCfg;
     }
 


Mime
View raw message