flink-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ches...@apache.org
Subject [5/7] flink git commit: [FLINK-6821] [runtime] Activate checkstyle for runtime/fs
Date Tue, 04 Jul 2017 12:35:28 GMT
[FLINK-6821] [runtime] Activate checkstyle for runtime/fs

This closes #4063.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/c9f659e0
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/c9f659e0
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/c9f659e0

Branch: refs/heads/master
Commit: c9f659e046a7b42e79d72df74bead5809ab2fe46
Parents: 31ad802
Author: zentol <chesnay@apache.org>
Authored: Fri Jun 2 21:16:52 2017 +0200
Committer: zentol <chesnay@apache.org>
Committed: Tue Jul 4 12:33:31 2017 +0200

----------------------------------------------------------------------
 flink-runtime/pom.xml                           |  1 -
 .../runtime/fs/hdfs/HadoopBlockLocation.java    | 16 ++---
 .../runtime/fs/hdfs/HadoopDataInputStream.java  | 19 +++--
 .../runtime/fs/hdfs/HadoopDataOutputStream.java |  8 ++-
 .../flink/runtime/fs/hdfs/HadoopFileStatus.java |  2 +-
 .../flink/runtime/fs/hdfs/HadoopFileSystem.java | 75 +++++++++-----------
 .../flink/runtime/fs/maprfs/MapRFileSystem.java | 29 ++++----
 .../fs/hdfs/HadoopDataInputStreamTest.java      |  3 +
 8 files changed, 73 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/c9f659e0/flink-runtime/pom.xml
----------------------------------------------------------------------
diff --git a/flink-runtime/pom.xml b/flink-runtime/pom.xml
index 80f95a5..1fff305 100644
--- a/flink-runtime/pom.xml
+++ b/flink-runtime/pom.xml
@@ -436,7 +436,6 @@ under the License.
 						**/runtime/deployment/**,
 						**/runtime/execution/**,
 						**/runtime/executiongraph/**,
-						**/runtime/fs/**,
 						**/runtime/heartbeat/**,
 						**/runtime/highavailability/**,
 						**/runtime/instance/**,

http://git-wip-us.apache.org/repos/asf/flink/blob/c9f659e0/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopBlockLocation.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopBlockLocation.java
b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopBlockLocation.java
index a1cc72c..1484c95 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopBlockLocation.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopBlockLocation.java
@@ -16,19 +16,17 @@
  * limitations under the License.
  */
 
-
 package org.apache.flink.runtime.fs.hdfs;
 
+import org.apache.flink.core.fs.BlockLocation;
+
 import java.io.IOException;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.flink.core.fs.BlockLocation;
-
 /**
  * Implementation of the {@link BlockLocation} interface for the
  * Hadoop Distributed File System.
- * 
  */
 public final class HadoopBlockLocation implements BlockLocation {
 
@@ -53,8 +51,8 @@ public final class HadoopBlockLocation implements BlockLocation {
 	private String[] hostnames;
 
 	/**
-	 * Creates a new block location
-	 * 
+	 * Creates a new block location.
+	 *
 	 * @param blockLocation
 	 *        the original HDFS block location
 	 */
@@ -63,7 +61,6 @@ public final class HadoopBlockLocation implements BlockLocation {
 		this.blockLocation = blockLocation;
 	}
 
-
 	@Override
 	public String[] getHosts() throws IOException {
 
@@ -88,7 +85,7 @@ public final class HadoopBlockLocation implements BlockLocation {
 
 	/**
 	 * Looks for a domain suffix in a FQDN and strips it if present.
-	 * 
+	 *
 	 * @param originalHostname
 	 *        the original hostname, possibly an FQDN
 	 * @return the stripped hostname without the domain suffix
@@ -114,21 +111,18 @@ public final class HadoopBlockLocation implements BlockLocation {
 		return originalHostname.substring(0, index);
 	}
 
-
 	@Override
 	public long getLength() {
 
 		return this.blockLocation.getLength();
 	}
 
-
 	@Override
 	public long getOffset() {
 
 		return this.blockLocation.getOffset();
 	}
 
-
 	@Override
 	public int compareTo(final BlockLocation o) {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/c9f659e0/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStream.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStream.java
b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStream.java
index 3cc841e..da50c4c 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStream.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStream.java
@@ -27,19 +27,19 @@ import java.io.IOException;
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
 /**
- * Concrete implementation of the {@link FSDataInputStream} for the Hadoop's input streams.
+ * Concrete implementation of the {@link FSDataInputStream} for Hadoop's input streams.
  * This supports all file systems supported by Hadoop, such as HDFS and S3 (S3a/S3n).
  */
 public final class HadoopDataInputStream extends FSDataInputStream {
 
 	/**
 	 * Minimum amount of bytes to skip forward before we issue a seek instead of discarding
read.
-	 * <p>
-	 * The current value is just a magic number. In the long run, this value could become configurable,
but for now it
+	 *
+	 * <p>The current value is just a magic number. In the long run, this value could
become configurable, but for now it
 	 * is a conservative, relatively small value that should bring safe improvements for small
skips (e.g. in reading
 	 * meta data), that would hurt the most with frequent seeks.
-	 * <p>
-	 * The optimal value depends on the DFS implementation and configuration plus the underlying
filesystem.
+	 *
+	 * <p>The optimal value depends on the DFS implementation and configuration plus the
underlying filesystem.
 	 * For now, this number is chosen "big enough" to provide improvements for smaller seeks,
and "small enough" to
 	 * avoid disadvantages over real seeks. While the minimum should be the page size, a true
optimum per system would
 	 * be the amounts of bytes the can be consumed sequentially within the seektime. Unfortunately,
seektime is not
@@ -47,11 +47,11 @@ public final class HadoopDataInputStream extends FSDataInputStream {
 	 */
 	public static final int MIN_SKIP_BYTES = 1024 * 1024;
 
-	/** The internal stream */
+	/** The internal stream. */
 	private final org.apache.hadoop.fs.FSDataInputStream fsDataInputStream;
 
 	/**
-	 * Creates a new data input stream from the given Hadoop input stream
+	 * Creates a new data input stream from the given Hadoop input stream.
 	 *
 	 * @param fsDataInputStream The Hadoop input stream
 	 */
@@ -59,7 +59,6 @@ public final class HadoopDataInputStream extends FSDataInputStream {
 		this.fsDataInputStream = checkNotNull(fsDataInputStream);
 	}
 
-
 	@Override
 	public void seek(long seekPos) throws IOException {
 		// We do some optimizations to avoid that some implementations of distributed FS perform
@@ -116,8 +115,8 @@ public final class HadoopDataInputStream extends FSDataInputStream {
 	/**
 	 * Positions the stream to the given location. In contrast to {@link #seek(long)}, this
method will
 	 * always issue a "seek" command to the dfs and may not replace it by {@link #skip(long)}
for small seeks.
-	 * <p>
-	 * Notice that the underlying DFS implementation can still decide to do skip instead of
seek.
+	 *
+	 * <p>Notice that the underlying DFS implementation can still decide to do skip instead
of seek.
 	 *
 	 * @param seekPos the position to seek to.
 	 * @throws IOException

http://git-wip-us.apache.org/repos/asf/flink/blob/c9f659e0/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataOutputStream.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataOutputStream.java
b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataOutputStream.java
index 8787181..1b8d1a3 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataOutputStream.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataOutputStream.java
@@ -18,10 +18,14 @@
 
 package org.apache.flink.runtime.fs.hdfs;
 
-import java.io.IOException;
-
 import org.apache.flink.core.fs.FSDataOutputStream;
 
+import java.io.IOException;
+
+/**
+ * Concrete implementation of the {@link FSDataOutputStream} for Hadoop's input streams.
+ * This supports all file systems supported by Hadoop, such as HDFS and S3 (S3a/S3n).
+ */
 public class HadoopDataOutputStream extends FSDataOutputStream {
 
 	private final org.apache.hadoop.fs.FSDataOutputStream fdos;

http://git-wip-us.apache.org/repos/asf/flink/blob/c9f659e0/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileStatus.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileStatus.java
b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileStatus.java
index 519791e..17bb334 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileStatus.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileStatus.java
@@ -31,7 +31,7 @@ public final class HadoopFileStatus implements FileStatus {
 
 	/**
 	 * Creates a new file status from a HDFS file status.
-	 * 
+	 *
 	 * @param fileStatus
 	 *        the HDFS file status
 	 */

http://git-wip-us.apache.org/repos/asf/flink/blob/c9f659e0/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java
b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java
index 1371d21..f47423f 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.runtime.fs.hdfs;
 
 import org.apache.flink.configuration.ConfigConstants;
@@ -25,6 +26,7 @@ import org.apache.flink.core.fs.FileSystem;
 import org.apache.flink.core.fs.HadoopFileSystemWrapper;
 import org.apache.flink.core.fs.Path;
 import org.apache.flink.util.InstantiationUtil;
+
 import org.apache.hadoop.conf.Configuration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -41,16 +43,16 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * Concrete implementation of the {@link FileSystem} base class for the Hadoop File System.
The
  * class is a wrapper class which encapsulated the original Hadoop HDFS API.
  *
- * If no file system class is specified, the wrapper will automatically load the Hadoop
+ * <p>If no file system class is specified, the wrapper will automatically load the
Hadoop
  * distributed file system (HDFS).
  *
  */
 public final class HadoopFileSystem extends FileSystem implements HadoopFileSystemWrapper
{
-	
+
 	private static final Logger LOG = LoggerFactory.getLogger(HadoopFileSystem.class);
-	
+
 	private static final String DEFAULT_HDFS_CLASS = "org.apache.hadoop.hdfs.DistributedFileSystem";
-	
+
 	/**
 	 * Configuration value name for the DFS implementation name. Usually not specified in hadoop
configurations.
 	 */
@@ -64,7 +66,7 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 	/**
 	 * Creates a new DistributedFileSystem object to access HDFS, based on a class name
 	 * and picking up the configuration from the class path or the Flink configuration.
-	 * 
+	 *
 	 * @throws IOException
 	 *         throw if the required HDFS classes cannot be instantiated
 	 */
@@ -72,7 +74,7 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 		// Create new Hadoop configuration object
 		this.conf = getHadoopConfiguration();
 
-		if(fsClass == null) {
+		if (fsClass == null) {
 			fsClass = getDefaultHDFSClass();
 		}
 
@@ -126,8 +128,7 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 		}
 
 		// fall back to an older Hadoop version
-		if (fsClass == null)
-		{
+		if (fsClass == null) {
 			// first of all, check for a user-defined hdfs class
 			if (LOG.isDebugEnabled()) {
 				LOG.debug("Falling back to loading HDFS class old Hadoop style. Looking for HDFS class
configuration entry '{}'.",
@@ -136,13 +137,12 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 
 			Class<?> classFromConfig = conf.getClass(HDFS_IMPLEMENTATION_KEY, null);
 
-			if (classFromConfig != null)
-			{
+			if (classFromConfig != null) {
 				if (org.apache.hadoop.fs.FileSystem.class.isAssignableFrom(classFromConfig)) {
 					fsClass = classFromConfig.asSubclass(org.apache.hadoop.fs.FileSystem.class);
 
 					if (LOG.isDebugEnabled()) {
-						LOG.debug("Loaded HDFS class '{}' as specified in configuration.", fsClass.getName()
);
+						LOG.debug("Loaded HDFS class '{}' as specified in configuration.", fsClass.getName());
 					}
 				}
 				else {
@@ -187,7 +187,7 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 	}
 
 	/**
-	 * Returns a new Hadoop Configuration object using the path to the hadoop conf configured

+	 * Returns a new Hadoop Configuration object using the path to the hadoop conf configured
 	 * in the main configuration (flink-conf.yaml).
 	 * This method is public because its being used in the HadoopDataSource.
 	 */
@@ -215,15 +215,15 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 		} else {
 			LOG.trace("{} configuration key for hdfs-site configuration file not set", ConfigConstants.HDFS_SITE_CONFIG);
 		}
-		
+
 		// 2. Approach environment variables
-		String[] possibleHadoopConfPaths = new String[4]; 
+		String[] possibleHadoopConfPaths = new String[4];
 		possibleHadoopConfPaths[0] = flinkConfiguration.getString(ConfigConstants.PATH_HADOOP_CONFIG,
null);
 		possibleHadoopConfPaths[1] = System.getenv("HADOOP_CONF_DIR");
-		
+
 		if (System.getenv("HADOOP_HOME") != null) {
-			possibleHadoopConfPaths[2] = System.getenv("HADOOP_HOME")+"/conf";
-			possibleHadoopConfPaths[3] = System.getenv("HADOOP_HOME")+"/etc/hadoop"; // hadoop 2.2
+			possibleHadoopConfPaths[2] = System.getenv("HADOOP_HOME") + "/conf";
+			possibleHadoopConfPaths[3] = System.getenv("HADOOP_HOME") + "/etc/hadoop"; // hadoop 2.2
 		}
 
 		for (String possibleHadoopConfPath : possibleHadoopConfPaths) {
@@ -245,10 +245,9 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 		}
 		return retConf;
 	}
-	
+
 	private org.apache.hadoop.fs.FileSystem instantiateFileSystem(Class<? extends org.apache.hadoop.fs.FileSystem>
fsClass)
-		throws IOException
-	{
+		throws IOException {
 		try {
 			return fsClass.newInstance();
 		}
@@ -266,7 +265,6 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 		}
 	}
 
-
 	@Override
 	public Path getWorkingDirectory() {
 		return new Path(this.fs.getWorkingDirectory().toUri());
@@ -288,30 +286,30 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 	public org.apache.hadoop.fs.FileSystem getHadoopFileSystem() {
 		return this.fs;
 	}
-	
+
 	@Override
 	public void initialize(URI path) throws IOException {
-		
+
 		// If the authority is not part of the path, we initialize with the fs.defaultFS entry.
 		if (path.getAuthority() == null) {
-			
+
 			String configEntry = this.conf.get("fs.defaultFS", null);
 			if (configEntry == null) {
 				// fs.default.name deprecated as of hadoop 2.2.0 http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/DeprecatedProperties.html
 				configEntry = this.conf.get("fs.default.name", null);
 			}
-			
+
 			if (LOG.isDebugEnabled()) {
 				LOG.debug("fs.defaultFS is set to {}", configEntry);
 			}
-			
+
 			if (configEntry == null) {
 				throw new IOException(getMissingAuthorityErrorPrefix(path) + "Either no default file
system (hdfs) configuration was registered, " +
 						"or that configuration did not contain an entry for the default file system (usually
'fs.defaultFS').");
 			} else {
 				try {
 					URI initURI = URI.create(configEntry);
-					
+
 					if (initURI.getAuthority() == null) {
 						throw new IOException(getMissingAuthorityErrorPrefix(path) + "Either no default file
system was registered, " +
 								"or the provided configuration contains no valid authority component (fs.default.name
or fs.defaultFS) " +
@@ -330,7 +328,7 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 					throw new IOException(getMissingAuthorityErrorPrefix(path) +
 							"The configuration contains an invalid file system default name (fs.default.name or
fs.defaultFS): " + configEntry);
 				}
-			} 
+			}
 		}
 		else {
 			// Initialize file system
@@ -341,11 +339,11 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 				String message = "The (HDFS NameNode) host at '" + path.getAuthority()
 						+ "', specified by file path '" + path.toString() + "', cannot be resolved"
 						+ (e.getMessage() != null ? ": " + e.getMessage() : ".");
-				
+
 				if (path.getPort() == -1) {
 					message += " Hint: Have you forgotten a slash? (correct URI would be 'hdfs:///" + path.getAuthority()
+ path.getPath() + "' ?)";
 				}
-				
+
 				throw new IOException(message, e);
 			}
 			catch (Exception e) {
@@ -355,14 +353,13 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 			}
 		}
 	}
-	
+
 	private static String getMissingAuthorityErrorPrefix(URI path) {
 		return "The given HDFS file URI (" + path.toString() + ") did not describe the HDFS NameNode."
+
-				" The attempt to use a default HDFS configuration, as specified in the '" + ConfigConstants.HDFS_DEFAULT_CONFIG
+ "' or '" + 
+				" The attempt to use a default HDFS configuration, as specified in the '" + ConfigConstants.HDFS_DEFAULT_CONFIG
+ "' or '" +
 				ConfigConstants.HDFS_SITE_CONFIG + "' config parameter failed due to the following problem:
";
 	}
 
-
 	@Override
 	public FileStatus getFileStatus(final Path f) throws IOException {
 		org.apache.hadoop.fs.FileStatus status = this.fs.getFileStatus(new org.apache.hadoop.fs.Path(f.toString()));
@@ -371,8 +368,7 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 
 	@Override
 	public BlockLocation[] getFileBlockLocations(final FileStatus file, final long start, final
long len)
-	throws IOException
-	{
+			throws IOException {
 		if (!(file instanceof HadoopFileStatus)) {
 			throw new IOException("file is not an instance of DistributedFileStatus");
 		}
@@ -407,15 +403,12 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 
 	@Override
 	public HadoopDataOutputStream create(final Path f, final boolean overwrite, final int bufferSize,
-			final short replication, final long blockSize)
-	throws IOException
-	{
+			final short replication, final long blockSize) throws IOException {
 		final org.apache.hadoop.fs.FSDataOutputStream fdos = this.fs.create(
 			new org.apache.hadoop.fs.Path(f.toString()), overwrite, bufferSize, replication, blockSize);
 		return new HadoopDataOutputStream(fdos);
 	}
 
-
 	@Override
 	public HadoopDataOutputStream create(final Path f, final WriteMode overwrite) throws IOException
{
 		final org.apache.hadoop.fs.FSDataOutputStream fsDataOutputStream = this.fs
@@ -437,7 +430,7 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 		for (int i = 0; i < files.length; i++) {
 			files[i] = new HadoopFileStatus(hadoopFiles[i]);
 		}
-		
+
 		return files;
 	}
 
@@ -476,7 +469,7 @@ public final class HadoopFileSystem extends FileSystem implements HadoopFileSyst
 //		}
 		clazz = hadoopConf.getClass("fs." + scheme + ".impl", null, org.apache.hadoop.fs.FileSystem.class);
 
-		if(clazz != null && LOG.isDebugEnabled()) {
+		if (clazz != null && LOG.isDebugEnabled()) {
 			LOG.debug("Flink supports {} with the Hadoop file system wrapper, impl {}", scheme, clazz);
 		}
 		return clazz;

http://git-wip-us.apache.org/repos/asf/flink/blob/c9f659e0/flink-runtime/src/main/java/org/apache/flink/runtime/fs/maprfs/MapRFileSystem.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/maprfs/MapRFileSystem.java
b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/maprfs/MapRFileSystem.java
index 57eea6f..275e492 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/fs/maprfs/MapRFileSystem.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/fs/maprfs/MapRFileSystem.java
@@ -18,17 +18,6 @@
 
 package org.apache.flink.runtime.fs.maprfs;
 
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.flink.core.fs.BlockLocation;
 import org.apache.flink.core.fs.FSDataInputStream;
 import org.apache.flink.core.fs.FSDataOutputStream;
@@ -40,6 +29,18 @@ import org.apache.flink.runtime.fs.hdfs.HadoopDataInputStream;
 import org.apache.flink.runtime.fs.hdfs.HadoopDataOutputStream;
 import org.apache.flink.runtime.fs.hdfs.HadoopFileStatus;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
 /**
  * Concrete implementation of the {@link FileSystem} base class for the MapR
  * file system. The class contains MapR specific code to initialize the
@@ -94,7 +95,7 @@ public final class MapRFileSystem extends FileSystem {
 
 	/**
 	 * Creates a new MapRFileSystem object to access the MapR file system.
-	 * 
+	 *
 	 * @throws IOException
 	 *             throw if the required MapR classes cannot be found
 	 */
@@ -180,8 +181,8 @@ public final class MapRFileSystem extends FileSystem {
 	}
 
 	/**
-	 * Retrieves the CLDB locations for the given MapR cluster name
-	 * 
+	 * Retrieves the CLDB locations for the given MapR cluster name.
+	 *
 	 * @param authority
 	 *            the name of the MapR cluster
 	 * @return a list of CLDB locations

http://git-wip-us.apache.org/repos/asf/flink/blob/c9f659e0/flink-runtime/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStreamTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStreamTest.java
b/flink-runtime/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStreamTest.java
index 58de3db..21c18bc 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStreamTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStreamTest.java
@@ -35,6 +35,9 @@ import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
 
+/**
+ * Tests for the {@link HadoopDataInputStream}.
+ */
 public class HadoopDataInputStreamTest {
 
 	private FSDataInputStream verifyInputStream;


Mime
View raw message