hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject [1/4] HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via Colin Patrick McCabe)
Date Tue, 09 Sep 2014 05:14:09 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 176b07b48 -> e73601860


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da118bb1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
index d16a4bb..755d499 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
@@ -103,9 +103,10 @@ public class TestDeleteBlockPool {
       fs1.delete(new Path("/alpha"), true);
       
       // Wait till all blocks are deleted from the dn2 for bpid1.
-      while ((MiniDFSCluster.getFinalizedDir(dn2StorageDir1, 
-          bpid1).list().length != 0) || (MiniDFSCluster.getFinalizedDir(
-              dn2StorageDir2, bpid1).list().length != 0)) {
+      File finalDir1 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid1);
+      File finalDir2 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid2);
+      while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) ||
+          (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) {
         try {
           Thread.sleep(3000);
         } catch (Exception ignored) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da118bb1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 61fa431..4cddd60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -41,6 +41,7 @@ import java.net.InetSocketAddress;
 import java.nio.channels.FileChannel;
 import java.security.PrivilegedExceptionAction;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
@@ -63,6 +64,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -750,15 +752,14 @@ public class TestFsck {
         for (int j=0; j<=1; j++) {
           File storageDir = cluster.getInstanceStorageDir(i, j);
           File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-          File[] blocks = data_dir.listFiles();
-          if (blocks == null)
+          List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+              data_dir);
+          if (metadataFiles == null)
             continue;
-  
-          for (int idx = 0; idx < blocks.length; idx++) {
-            if (!blocks[idx].getName().startsWith("blk_")) {
-              continue;
-            }
-            assertTrue("Cannot remove file.", blocks[idx].delete());
+          for (File metadataFile : metadataFiles) {
+            File blockFile = Block.metaToBlockFile(metadataFile);
+            assertTrue("Cannot remove file.", blockFile.delete());
+            assertTrue("Cannot remove file.", metadataFile.delete());
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da118bb1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
index 18f83ef..7e36acb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
@@ -25,6 +25,7 @@ import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.util.Collection;
+import java.util.List;
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
@@ -39,7 +40,11 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestFileCorruption;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
 
@@ -87,36 +92,29 @@ public class TestListCorruptFileBlocks {
       File storageDir = cluster.getInstanceStorageDir(0, 1);
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
-      File[] blocks = data_dir.listFiles();
-      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length
> 0));
-      for (int idx = 0; idx < blocks.length; idx++) {
-        if (blocks[idx].getName().startsWith("blk_") &&
-            blocks[idx].getName().endsWith(".meta")) {
-          //
-          // shorten .meta file
-          //
-          RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
-          FileChannel channel = file.getChannel();
-          long position = channel.size() - 2;
-          int length = 2;
-          byte[] buffer = new byte[length];
-          random.nextBytes(buffer);
-          channel.write(ByteBuffer.wrap(buffer), position);
-          file.close();
-          LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
-              " at offset " + position + " length " + length);
-
-          // read all files to trigger detection of corrupted replica
-          try {
-            util.checkFiles(fs, "/srcdat10");
-          } catch (BlockMissingException e) {
-            System.out.println("Received BlockMissingException as expected.");
-          } catch (IOException e) {
-            assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException
" +
-                " but received IOException " + e, false);
-          }
-          break;
-        }
+      List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
+      assertTrue("Data directory does not contain any blocks or there was an "
+          + "IO error", metaFiles != null && !metaFiles.isEmpty());
+      File metaFile = metaFiles.get(0);
+      RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
+      FileChannel channel = file.getChannel();
+      long position = channel.size() - 2;
+      int length = 2;
+      byte[] buffer = new byte[length];
+      random.nextBytes(buffer);
+      channel.write(ByteBuffer.wrap(buffer), position);
+      file.close();
+      LOG.info("Deliberately corrupting file " + metaFile.getName() +
+          " at offset " + position + " length " + length);
+
+      // read all files to trigger detection of corrupted replica
+      try {
+        util.checkFiles(fs, "/srcdat10");
+      } catch (BlockMissingException e) {
+        System.out.println("Received BlockMissingException as expected.");
+      } catch (IOException e) {
+        assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException
" +
+            " but received IOException " + e, false);
       }
 
       // fetch bad file list from namenode. There should be one file.
@@ -174,38 +172,30 @@ public class TestListCorruptFileBlocks {
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, 
           cluster.getNamesystem().getBlockPoolId());
       assertTrue("data directory does not exist", data_dir.exists());
-      File[] blocks = data_dir.listFiles();
-      assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
-                 (blocks.length > 0));
-      for (int idx = 0; idx < blocks.length; idx++) {
-        if (blocks[idx].getName().startsWith("blk_") &&
-            blocks[idx].getName().endsWith(".meta")) {
-          //
-          // shorten .meta file
-          //
-          RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
-          FileChannel channel = file.getChannel();
-          long position = channel.size() - 2;
-          int length = 2;
-          byte[] buffer = new byte[length];
-          random.nextBytes(buffer);
-          channel.write(ByteBuffer.wrap(buffer), position);
-          file.close();
-          LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
-              " at offset " + position + " length " + length);
-
-          // read all files to trigger detection of corrupted replica
-          try {
-            util.checkFiles(fs, "/srcdat10");
-          } catch (BlockMissingException e) {
-            System.out.println("Received BlockMissingException as expected.");
-          } catch (IOException e) {
-            assertTrue("Corrupted replicas not handled properly. " +
-                       "Expecting BlockMissingException " +
-                       " but received IOException " + e, false);
-          }
-          break;
-        }
+      List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
+      assertTrue("Data directory does not contain any blocks or there was an "
+          + "IO error", metaFiles != null && !metaFiles.isEmpty());
+      File metaFile = metaFiles.get(0);
+      RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
+      FileChannel channel = file.getChannel();
+      long position = channel.size() - 2;
+      int length = 2;
+      byte[] buffer = new byte[length];
+      random.nextBytes(buffer);
+      channel.write(ByteBuffer.wrap(buffer), position);
+      file.close();
+      LOG.info("Deliberately corrupting file " + metaFile.getName() +
+          " at offset " + position + " length " + length);
+
+      // read all files to trigger detection of corrupted replica
+      try {
+        util.checkFiles(fs, "/srcdat10");
+      } catch (BlockMissingException e) {
+        System.out.println("Received BlockMissingException as expected.");
+      } catch (IOException e) {
+        assertTrue("Corrupted replicas not handled properly. " +
+                   "Expecting BlockMissingException " +
+                   " but received IOException " + e, false);
       }
 
       // fetch bad file list from namenode. There should be one file.
@@ -295,17 +285,18 @@ public class TestListCorruptFileBlocks {
         for (int j = 0; j <= 1; j++) {
           File storageDir = cluster.getInstanceStorageDir(i, j);
           File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-          File[] blocks = data_dir.listFiles();
-          if (blocks == null)
+          List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+              data_dir);
+          if (metadataFiles == null)
             continue;
           // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
           // (blocks.length > 0));
-          for (int idx = 0; idx < blocks.length; idx++) {
-            if (!blocks[idx].getName().startsWith("blk_")) {
-              continue;
-            }
-            LOG.info("Deliberately removing file " + blocks[idx].getName());
-            assertTrue("Cannot remove file.", blocks[idx].delete());
+          for (File metadataFile : metadataFiles) {
+            File blockFile = Block.metaToBlockFile(metadataFile);
+            LOG.info("Deliberately removing file " + blockFile.getName());
+            assertTrue("Cannot remove file.", blockFile.delete());
+            LOG.info("Deliberately removing file " + metadataFile.getName());
+            assertTrue("Cannot remove file.", metadataFile.delete());
             // break;
           }
         }
@@ -405,17 +396,18 @@ public class TestListCorruptFileBlocks {
       for (int i = 0; i < 2; i++) {
         File storageDir = cluster.getInstanceStorageDir(0, i);
         File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-        File[] blocks = data_dir.listFiles();
-        if (blocks == null)
+        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+            data_dir);
+        if (metadataFiles == null)
           continue;
         // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
         // (blocks.length > 0));
-        for (int idx = 0; idx < blocks.length; idx++) {
-          if (!blocks[idx].getName().startsWith("blk_")) {
-            continue;
-          }
-          LOG.info("Deliberately removing file " + blocks[idx].getName());
-          assertTrue("Cannot remove file.", blocks[idx].delete());
+        for (File metadataFile : metadataFiles) {
+          File blockFile = Block.metaToBlockFile(metadataFile);
+          LOG.info("Deliberately removing file " + blockFile.getName());
+          assertTrue("Cannot remove file.", blockFile.delete());
+          LOG.info("Deliberately removing file " + metadataFile.getName());
+          assertTrue("Cannot remove file.", metadataFile.delete());
           // break;
         }
       }
@@ -482,15 +474,14 @@ public class TestListCorruptFileBlocks {
           File storageDir = cluster.getInstanceStorageDir(i, j);
           File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
           LOG.info("Removing files from " + data_dir);
-          File[] blocks = data_dir.listFiles();
-          if (blocks == null)
+          List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+              data_dir);
+          if (metadataFiles == null)
             continue;
-  
-          for (int idx = 0; idx < blocks.length; idx++) {
-            if (!blocks[idx].getName().startsWith("blk_")) {
-              continue;
-            }
-            assertTrue("Cannot remove file.", blocks[idx].delete());
+          for (File metadataFile : metadataFiles) {
+            File blockFile = Block.metaToBlockFile(metadataFile);
+            assertTrue("Cannot remove file.", blockFile.delete());
+            assertTrue("Cannot remove file.", metadataFile.delete());
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da118bb1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz
new file mode 100644
index 0000000..9f666fe
Binary files /dev/null and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz
differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da118bb1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt
new file mode 100644
index 0000000..e5890cc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Similar to hadoop-dfs-dir.txt, except this is used for a datanode layout
+# upgrade test.
+# Uncomment the following line to produce checksum info for a new DFS image.
+#printChecksums
+
+/small 	 2976363016
+overallCRC 	 4099869518
+	


Mime
View raw message