Return-Path: Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: (qmail 72883 invoked from network); 9 Dec 2010 23:36:50 -0000 Received: from unknown (HELO mail.apache.org) (140.211.11.3) by 140.211.11.9 with SMTP; 9 Dec 2010 23:36:50 -0000 Received: (qmail 20043 invoked by uid 500); 9 Dec 2010 23:36:50 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 20018 invoked by uid 500); 9 Dec 2010 23:36:50 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 20010 invoked by uid 99); 9 Dec 2010 23:36:50 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 Dec 2010 23:36:50 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 Dec 2010 23:36:48 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id CEC89238897A; Thu, 9 Dec 2010 23:36:28 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1044166 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/fs/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/protocol/ s... Date: Thu, 09 Dec 2010 23:36:28 -0000 To: hdfs-commits@hadoop.apache.org From: hairong@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20101209233628.CEC89238897A@eris.apache.org> Author: hairong Date: Thu Dec 9 23:36:21 2010 New Revision: 1044166 URL: http://svn.apache.org/viewvc?rev=1044166&view=rev Log: HDFS-1533. A more elegant FileSystem#listCorruptFileBlocks API (HDFS portion). Contributed by Patrick Kling. Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestCorruptFileBlocks.java Modified: hadoop/hdfs/trunk/CHANGES.txt hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Modified: hadoop/hdfs/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1044166&r1=1044165&r2=1044166&view=diff ============================================================================== --- hadoop/hdfs/trunk/CHANGES.txt (original) +++ hadoop/hdfs/trunk/CHANGES.txt Thu Dec 9 23:36:21 2010 @@ -20,6 +20,9 @@ Trunk (unreleased changes) HDFS-1506. Refactor fsimage loading code. (hairong) + HDFS-1533. A more elegant FileSystem#listCorruptFileBlocks API + (HDFS portion) (Patrick Kling via hairong) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java?rev=1044166&r1=1044165&r2=1044166&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java Thu Dec 9 23:36:21 2010 @@ -30,6 +30,7 @@ import org.apache.hadoop.classification. import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.CorruptFileBlockIterator; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -307,10 +308,9 @@ public class Hdfs extends AbstractFileSy * {@inheritDoc} */ @Override - public CorruptFileBlocks listCorruptFileBlocks(String path, - String cookie) + public RemoteIterator listCorruptFileBlocks(Path path) throws IOException { - return dfs.listCorruptFileBlocks(path, cookie); + return new CorruptFileBlockIterator(dfs, path); } @Override Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java?rev=1044166&view=auto ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java (added) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java Thu Dec 9 23:36:21 2010 @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import java.io.IOException; +import java.util.NoSuchElementException; + +import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; + +/** + * Provides an iterator interface for listCorruptFileBlocks. + * This class is used by DistributedFileSystem and Hdfs. + */ +public class CorruptFileBlockIterator implements RemoteIterator { + private final DFSClient dfs; + private String path; + + private String[] files = null; + private int fileIdx = 0; + private String cookie = null; + private Path nextPath = null; + + private int callsMade = 0; + + public CorruptFileBlockIterator(DFSClient dfs, Path path) throws IOException { + this.dfs = dfs; + this.path = path2String(path); + loadNext(); + } + + /** + * @return the number of calls made to the DFSClient. + * This is for debugging and testing purposes. + */ + public int getCallsMade() { + return callsMade; + } + + private String path2String(Path path) { + return path.toUri().getPath(); + } + + private Path string2Path(String string) { + return new Path(string); + } + + private void loadNext() throws IOException { + if (files == null || fileIdx >= files.length) { + CorruptFileBlocks cfb = dfs.listCorruptFileBlocks(path, cookie); + files = cfb.getFiles(); + cookie = cfb.getCookie(); + fileIdx = 0; + callsMade++; + } + + if (fileIdx >= files.length) { + // received an empty response + // there are no more corrupt file blocks + nextPath = null; + } else { + nextPath = string2Path(files[fileIdx]); + fileIdx++; + } + } + + /** + * {@inheritDoc} + */ + @Override + public boolean hasNext() { + return nextPath != null; + } + + /** + * {@inheritDoc} + */ + @Override + public Path next() throws IOException { + if (!hasNext()) { + throw new NoSuchElementException("No more corrupt file blocks"); + } + + Path result = nextPath; + loadNext(); + + return result; + } +} \ No newline at end of file Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1044166&r1=1044165&r2=1044166&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Thu Dec 9 23:36:21 2010 @@ -61,12 +61,12 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.fs.CorruptFileBlocks; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1044166&r1=1044165&r2=1044166&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Thu Dec 9 23:36:21 2010 @@ -43,7 +43,6 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; -import org.apache.hadoop.fs.CorruptFileBlocks; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; import org.apache.hadoop.hdfs.protocol.Block; @@ -606,10 +605,9 @@ public class DistributedFileSystem exten * {@inheritDoc} */ @Override - public CorruptFileBlocks listCorruptFileBlocks(String path, - String cookie) + public RemoteIterator listCorruptFileBlocks(Path path) throws IOException { - return dfs.listCorruptFileBlocks(path, cookie); + return new CorruptFileBlockIterator(dfs, path); } /** Return statistics for each datanode. */ Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1044166&r1=1044165&r2=1044166&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Thu Dec 9 23:36:21 2010 @@ -32,7 +32,6 @@ import org.apache.hadoop.fs.FileAlreadyE import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.fs.CorruptFileBlocks; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java?rev=1044166&view=auto ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java (added) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java Thu Dec 9 23:36:21 2010 @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.Text; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Arrays; + +/** + * Contains a list of paths corresponding to corrupt files and a cookie + * used for iterative calls to NameNode.listCorruptFileBlocks. + * + */ +public class CorruptFileBlocks implements Writable { + // used for hashCode + private static final int PRIME = 16777619; + + private String[] files; + private String cookie; + + public CorruptFileBlocks() { + this(new String[0], ""); + } + + public CorruptFileBlocks(String[] files, String cookie) { + this.files = files; + this.cookie = cookie; + } + + public String[] getFiles() { + return files; + } + + public String getCookie() { + return cookie; + } + + /** + * {@inheritDoc} + */ + @Override + public void readFields(DataInput in) throws IOException { + int fileCount = in.readInt(); + files = new String[fileCount]; + for (int i = 0; i < fileCount; i++) { + files[i] = Text.readString(in); + } + cookie = Text.readString(in); + } + + /** + * {@inheritDoc} + */ + @Override + public void write(DataOutput out) throws IOException { + out.writeInt(files.length); + for (int i = 0; i < files.length; i++) { + Text.writeString(out, files[i]); + } + Text.writeString(out, cookie); + } + + /** + * {@inheritDoc} + */ + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof CorruptFileBlocks)) { + return false; + } + CorruptFileBlocks other = (CorruptFileBlocks) obj; + return cookie.equals(other.cookie) && + Arrays.equals(files, other.files); + } + + /** + * {@inheritDoc} + */ + public int hashCode() { + int result = cookie.hashCode(); + + for (String file : files) { + result = PRIME * result + file.hashCode(); + } + + return result; + } +} Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1044166&r1=1044165&r2=1044166&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Dec 9 23:36:21 2010 @@ -4832,10 +4832,14 @@ public class FSNamesystem implements FSC * @throws IOException */ Collection listCorruptFileBlocks(String path, - String startBlockAfter) throws AccessControlException, IOException { + String startBlockAfter) throws IOException { readLock(); try { + if (isInSafeMode()) { + throw new IOException("Cannot run listCorruptFileBlocks because " + + "replication queues have not been initialized."); + } checkSuperuserPrivilege(); long startBlockId = 0; // print a limited # of corrupt files per call Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1044166&r1=1044165&r2=1044166&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Dec 9 23:36:21 2010 @@ -43,7 +43,6 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.fs.CorruptFileBlocks; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -52,6 +51,7 @@ import org.apache.hadoop.hdfs.HdfsConfig import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1044166&r1=1044165&r2=1044166&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Thu Dec 9 23:36:21 2010 @@ -214,8 +214,7 @@ public class NamenodeFsck { } } - private void listCorruptFileBlocks() throws AccessControlException, - IOException { + private void listCorruptFileBlocks() throws IOException { Collection corruptFiles = namenode. getNamesystem().listCorruptFileBlocks(path, startBlockAfter); int numCorruptFiles = corruptFiles.size(); Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestCorruptFileBlocks.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestCorruptFileBlocks.java?rev=1044166&view=auto ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestCorruptFileBlocks.java (added) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestCorruptFileBlocks.java Thu Dec 9 23:36:21 2010 @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; + +import static org.junit.Assert.assertTrue; +import org.junit.Test; + +import org.apache.hadoop.io.DataOutputBuffer; + +public class TestCorruptFileBlocks { + + /** + * Serialize the cfb given, deserialize and return the result. + */ + static CorruptFileBlocks serializeAndDeserialize(CorruptFileBlocks cfb) + throws IOException { + DataOutputBuffer buf = new DataOutputBuffer(); + cfb.write(buf); + + byte[] data = buf.getData(); + DataInputStream input = new DataInputStream(new ByteArrayInputStream(data)); + + CorruptFileBlocks result = new CorruptFileBlocks(); + result.readFields(input); + + return result; + } + + /** + * Check whether cfb is unchanged after serialization and deserialization. + */ + static boolean checkSerialize(CorruptFileBlocks cfb) + throws IOException { + return cfb.equals(serializeAndDeserialize(cfb)); + } + + /** + * Test serialization and deserializaton of CorruptFileBlocks. + */ + @Test + public void testSerialization() throws IOException { + { + CorruptFileBlocks cfb = new CorruptFileBlocks(); + assertTrue("cannot serialize empty CFB", checkSerialize(cfb)); + } + + { + String[] files = new String[0]; + CorruptFileBlocks cfb = new CorruptFileBlocks(files, ""); + assertTrue("cannot serialize CFB with empty cookie", checkSerialize(cfb)); + } + + { + String[] files = { "a", "bb", "ccc" }; + CorruptFileBlocks cfb = new CorruptFileBlocks(files, "test"); + assertTrue("cannot serialize CFB", checkSerialize(cfb)); + } + } +} \ No newline at end of file Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1044166&r1=1044165&r2=1044166&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Thu Dec 9 23:36:21 2010 @@ -39,13 +39,13 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.CorruptFileBlocks; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.tools.DFSck; import org.apache.hadoop.io.IOUtils; Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1044166&r1=1044165&r2=1044166&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Thu Dec 9 23:36:21 2010 @@ -25,13 +25,16 @@ import java.nio.channels.FileChannel; import java.util.Collection; import java.util.Random; -import junit.framework.TestCase; +import org.junit.Test; +import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.CorruptFileBlocks; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.BlockMissingException; +import org.apache.hadoop.hdfs.CorruptFileBlockIterator; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -45,10 +48,11 @@ import org.apache.hadoop.hdfs.Distribute * with a block # from a previous call and validate that the subsequent * blocks/files are also returned. */ -public class TestListCorruptFileBlocks extends TestCase { +public class TestListCorruptFileBlocks { static Log LOG = NameNode.stateChangeLog; /** check if nn.getCorruptFiles() returns a file that has corrupted blocks */ + @Test public void testListCorruptFilesCorruptedBlock() throws Exception { MiniDFSCluster cluster = null; Random random = new Random(); @@ -119,6 +123,7 @@ public class TestListCorruptFileBlocks e } // deliberately remove blocks from a file and validate the list-corrupt-file-blocks API + @Test public void testlistCorruptFileBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong("dfs.blockreport.intervalMsec", 1000); @@ -212,9 +217,19 @@ public class TestListCorruptFileBlocks e } } + private int countPaths(RemoteIterator iter) throws IOException { + int i = 0; + while (iter.hasNext()) { + LOG.info("PATH: " + iter.next().toUri().getPath()); + i++; + } + return i; + } + /** * test listCorruptFileBlocks in DistributedFileSystem - */ + */ + @Test public void testlistCorruptFileBlocksDFS() throws Exception { Configuration conf = new Configuration(); conf.setLong("dfs.blockreport.intervalMsec", 1000); @@ -232,9 +247,9 @@ public class TestListCorruptFileBlocks e util.createFiles(fs, "/corruptData"); final NameNode namenode = cluster.getNameNode(); - CorruptFileBlocks corruptFileBlocks = - dfs.listCorruptFileBlocks("/corruptData", null); - int numCorrupt = corruptFileBlocks.getFiles().length; + RemoteIterator corruptFileBlocks = + dfs.listCorruptFileBlocks(new Path("/corruptData")); + int numCorrupt = countPaths(corruptFileBlocks); assertTrue(numCorrupt == 0); // delete the blocks File baseDir = new File(System.getProperty("test.build.data", @@ -258,12 +273,12 @@ public class TestListCorruptFileBlocks e } int count = 0; - corruptFileBlocks = dfs.listCorruptFileBlocks("/corruptData", null); - numCorrupt = corruptFileBlocks.getFiles().length; + corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); + numCorrupt = countPaths(corruptFileBlocks); while (numCorrupt < 3) { Thread.sleep(1000); - corruptFileBlocks = dfs.listCorruptFileBlocks("/corruptData", null); - numCorrupt = corruptFileBlocks.getFiles().length; + corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); + numCorrupt = countPaths(corruptFileBlocks); count++; if (count > 30) break; @@ -281,7 +296,12 @@ public class TestListCorruptFileBlocks e } } - /** check if NN.listCorruptFiles() returns the right limit */ + /** + * Test if NN.listCorruptFiles() returns the right number of results. + * Also, test that DFS.listCorruptFileBlocks can make multiple successive + * calls. + */ + @Test public void testMaxCorruptFiles() throws Exception { MiniDFSCluster cluster = null; try { @@ -338,6 +358,17 @@ public class TestListCorruptFileBlocks e assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + maxCorruptFileBlocks + ".", badFiles.size() == maxCorruptFileBlocks); + + CorruptFileBlockIterator iter = (CorruptFileBlockIterator) + fs.listCorruptFileBlocks(new Path("/srcdat2")); + int corruptPaths = countPaths(iter); + assertTrue("Expected more than " + maxCorruptFileBlocks + + " corrupt file blocks but got " + corruptPaths, + corruptPaths > maxCorruptFileBlocks); + assertTrue("Iterator should have made more than 1 call but made " + + iter.getCallsMade(), + iter.getCallsMade() > 1); + util.cleanup(fs, "/srcdat2"); } finally { if (cluster != null) { cluster.shutdown(); }