Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E436411679 for ; Thu, 25 Sep 2014 03:13:41 +0000 (UTC) Received: (qmail 49193 invoked by uid 500); 25 Sep 2014 03:13:41 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 49120 invoked by uid 500); 25 Sep 2014 03:13:41 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 49111 invoked by uid 99); 25 Sep 2014 03:13:41 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 25 Sep 2014 03:13:41 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id 4C4ED913D27; Thu, 25 Sep 2014 03:13:41 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: arp@apache.org To: common-commits@hadoop.apache.org Message-Id: <6587bd380ef54e65ae3669890fe573cc@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: git commit: HDFS-7143. Fix findbugs warnings in HDFS-6581 branch Date: Thu, 25 Sep 2014 03:13:41 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/HDFS-6581 b1000fbba -> 3f9255f21 HDFS-7143. Fix findbugs warnings in HDFS-6581 branch Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f9255f2 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f9255f2 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f9255f2 Branch: refs/heads/HDFS-6581 Commit: 3f9255f2128c9e70ce63764b33e7b4ccbd815231 Parents: b1000fb Author: arp Authored: Wed Sep 24 20:13:30 2014 -0700 Committer: arp Committed: Wed Sep 24 20:13:30 2014 -0700 ---------------------------------------------------------------------- .../hadoop-hdfs/CHANGES-HDFS-6581.txt | 2 + .../datanode/fsdataset/impl/BlockPoolSlice.java | 50 +++++++++++++++----- 2 files changed, 40 insertions(+), 12 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f9255f2/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt index e046421..6eb8cec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt @@ -71,4 +71,6 @@ HDFS-6990. Add unit test for evict/delete RAM_DISK block with open handle. (Xiaoyu Yao via Arpit Agarwal) + HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (szetszwo via + Arpit Agarwal) http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f9255f2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 2ee16f6..3eeb3ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -29,6 +29,8 @@ import java.io.RandomAccessFile; import java.util.Scanner; import org.apache.commons.io.FileUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DU; import org.apache.hadoop.fs.FileUtil; @@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -57,6 +60,8 @@ import org.apache.hadoop.util.Time; * This class is synchronized by {@link FsVolumeImpl}. */ class BlockPoolSlice { + static final Log LOG = LogFactory.getLog(BlockPoolSlice.class); + private final String bpid; private final FsVolumeImpl volume; // volume to which this BlockPool belongs to private final File currentDir; // StorageDirectory/current/bpid/current @@ -369,22 +374,36 @@ class BlockPoolSlice { File targetDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId); if (blockFile.exists()) { - File targetBlockFile = new File(targetDir, blockFile.getName()); - File targetMetaFile = new File(targetDir, metaFile.getName()); if (!targetDir.exists() && !targetDir.mkdirs()) { - FsDatasetImpl.LOG.warn("Failed to move " + blockFile + " to " + targetDir); + LOG.warn("Failed to mkdirs " + targetDir); + continue; + } + + final File targetMetaFile = new File(targetDir, metaFile.getName()); + try { + NativeIO.renameTo(metaFile, targetMetaFile); + } catch (IOException e) { + LOG.warn("Failed to move meta file from " + + metaFile + " to " + targetMetaFile, e); continue; + } - metaFile.renameTo(targetMetaFile); - blockFile.renameTo(targetBlockFile); + final File targetBlockFile = new File(targetDir, blockFile.getName()); + try { + NativeIO.renameTo(blockFile, targetBlockFile); + } catch (IOException e) { + LOG.warn("Failed to move block file from " + + blockFile + " to " + targetBlockFile, e); + continue; + } if (targetBlockFile.exists() && targetMetaFile.exists()) { ++numRecovered; } else { // Failure should be rare. - FsDatasetImpl.LOG.warn("Failed to move " + blockFile + " to " + targetDir); + LOG.warn("Failed to move " + blockFile + " to " + targetDir); } } } @@ -538,16 +557,23 @@ class BlockPoolSlice { replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1; + if (LOG.isDebugEnabled()) { + LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep + + ". Will try to delete " + replicaToDelete); + } + // Update volumeMap. volumeMap.add(bpid, replicaToKeep); // Delete the files on disk. Failure here is okay. - replicaToDelete.getBlockFile().delete(); - replicaToDelete.getMetaFile().delete(); - - FsDatasetImpl.LOG.info( - "resolveDuplicateReplicas keeping " + replicaToKeep.getBlockFile() + - ", deleting " + replicaToDelete.getBlockFile()); + final File blockFile = replicaToDelete.getBlockFile(); + if (!blockFile.delete()) { + LOG.warn("Failed to delete block file " + blockFile); + } + final File metaFile = replicaToDelete.getMetaFile(); + if (!metaFile.delete()) { + LOG.warn("Failed to delete meta file " + metaFile); + } return replicaToKeep; }