hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject hadoop git commit: HDFS-8941. DistributedFileSystem listCorruptFileBlocks API should resolve relative path. Contributed by Rakesh R.
Date Fri, 09 Oct 2015 18:57:12 GMT
Repository: hadoop
Updated Branches:
  refs/heads/trunk 357b1fd08 -> c32614f41


HDFS-8941. DistributedFileSystem listCorruptFileBlocks API should resolve relative path. Contributed
by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c32614f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c32614f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c32614f4

Branch: refs/heads/trunk
Commit: c32614f410fb62a7179abfefbab42a05415a3066
Parents: 357b1fd
Author: Andrew Wang <wang@apache.org>
Authored: Fri Oct 9 11:57:03 2015 -0700
Committer: Andrew Wang <wang@apache.org>
Committed: Fri Oct 9 11:57:03 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/DistributedFileSystem.java      | 17 ++++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../namenode/TestListCorruptFileBlocks.java     | 68 ++++++++++++++++++++
 3 files changed, 86 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c32614f4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 512c9c1..8ed892c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1112,9 +1112,22 @@ public class DistributedFileSystem extends FileSystem {
   }
 
   @Override
-  public RemoteIterator<Path> listCorruptFileBlocks(Path path)
+  public RemoteIterator<Path> listCorruptFileBlocks(final Path path)
       throws IOException {
-    return new CorruptFileBlockIterator(dfs, path);
+    Path absF = fixRelativePart(path);
+    return new FileSystemLinkResolver<RemoteIterator<Path>>() {
+      @Override
+      public RemoteIterator<Path> doCall(final Path path) throws IOException,
+          UnresolvedLinkException {
+        return new CorruptFileBlockIterator(dfs, path);
+      }
+
+      @Override
+      public RemoteIterator<Path> next(final FileSystem fs, final Path path)
+          throws IOException {
+        return fs.listCorruptFileBlocks(path);
+      }
+    }.resolve(this, absF);
   }
 
   /** @return datanode statistics. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c32614f4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 24c359b..9d73776 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2004,6 +2004,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9142. Separating Configuration object for namenode(s) in
     MiniDFSCluster. (Siqi Li via mingma)
 
+    HDFS-8941. DistributedFileSystem listCorruptFileBlocks API should
+    resolve relative path. (Rakesh R via wang)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c32614f4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
index 99dce1d..0b273df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
@@ -530,4 +530,72 @@ public class TestListCorruptFileBlocks {
     }
   }
 
+  @Test(timeout = 60000)
+  public void testListCorruptFileBlocksOnRelativePath() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
+
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      cluster.waitActive();
+      FileSystem fs = cluster.getFileSystem();
+      DistributedFileSystem dfs = (DistributedFileSystem) fs;
+      final Path baseDir = new Path("/somewhere/base");
+      fs.mkdirs(baseDir);
+      // set working dir
+      fs.setWorkingDirectory(baseDir);
+
+      DFSTestUtil util = new DFSTestUtil.Builder()
+          .setName("testGetCorruptFilesOnRelativePath").setNumFiles(3)
+          .setMaxLevels(1).setMaxSize(1024).build();
+      util.createFiles(fs, "corruptData");
+
+      RemoteIterator<Path> corruptFileBlocks = dfs
+          .listCorruptFileBlocks(new Path("corruptData"));
+      int numCorrupt = countPaths(corruptFileBlocks);
+      assertTrue(numCorrupt == 0);
+
+      // delete the blocks
+      String bpid = cluster.getNamesystem().getBlockPoolId();
+      // For loop through number of data directories per datanode (2)
+      for (int i = 0; i < 2; i++) {
+        File storageDir = cluster.getInstanceStorageDir(0, i);
+        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+        List<File> metadataFiles = MiniDFSCluster
+            .getAllBlockMetadataFiles(data_dir);
+        if (metadataFiles == null)
+          continue;
+        for (File metadataFile : metadataFiles) {
+          File blockFile = Block.metaToBlockFile(metadataFile);
+          LOG.info("Deliberately removing file " + blockFile.getName());
+          assertTrue("Cannot remove file.", blockFile.delete());
+          LOG.info("Deliberately removing file " + metadataFile.getName());
+          assertTrue("Cannot remove file.", metadataFile.delete());
+        }
+      }
+
+      int count = 0;
+      corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("corruptData"));
+      numCorrupt = countPaths(corruptFileBlocks);
+      while (numCorrupt < 3) {
+        Thread.sleep(1000);
+        corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("corruptData"));
+        numCorrupt = countPaths(corruptFileBlocks);
+        count++;
+        if (count > 30)
+          break;
+      }
+      // Validate we get all the corrupt files
+      LOG.info("Namenode has bad files. " + numCorrupt);
+      assertTrue("Failed to get corrupt files!", numCorrupt == 3);
+
+      util.cleanup(fs, "corruptData");
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }


Mime
View raw message