Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id B19F218FCC for ; Fri, 26 Jun 2015 21:39:01 +0000 (UTC) Received: (qmail 5755 invoked by uid 500); 26 Jun 2015 21:38:57 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 5451 invoked by uid 500); 26 Jun 2015 21:38:57 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 4805 invoked by uid 99); 26 Jun 2015 21:38:56 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 26 Jun 2015 21:38:56 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 971C7E05D6; Fri, 26 Jun 2015 21:38:56 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: arp@apache.org To: common-commits@hadoop.apache.org Date: Fri, 26 Jun 2015 21:39:07 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [12/19] hadoop git commit: HDFS-8546. Use try with resources in DataStorage and Storage. HDFS-8546. Use try with resources in DataStorage and Storage. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1403b84b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1403b84b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1403b84b Branch: refs/heads/HDFS-7240 Commit: 1403b84b122fb76ef2b085a728b5402c32499c1f Parents: ff0e5e5 Author: Andrew Wang Authored: Thu Jun 25 17:50:32 2015 -0700 Committer: Andrew Wang Committed: Thu Jun 25 17:50:32 2015 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/server/common/Storage.java | 24 ++++------- .../hdfs/server/datanode/DataStorage.java | 45 ++++++-------------- 3 files changed, 25 insertions(+), 46 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1403b84b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index dcedb9f..7b97f41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -674,6 +674,8 @@ Release 2.8.0 - UNRELEASED HDFS-8665. Fix replication check in DFSTestUtils#waitForReplication. (wang) + HDFS-8546. Use try with resources in DataStorage and Storage. (wang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than http://git-wip-us.apache.org/repos/asf/hadoop/blob/1403b84b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index e6f0999..c630206 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -709,6 +709,7 @@ public abstract class Storage extends StorageInfo { try { res = file.getChannel().tryLock(); if (null == res) { + LOG.error("Unable to acquire file lock on path " + lockF.toString()); throw new OverlappingFileLockException(); } file.write(jvmName.getBytes(Charsets.UTF_8)); @@ -972,35 +973,28 @@ public abstract class Storage extends StorageInfo { public void writeProperties(File to, StorageDirectory sd) throws IOException { Properties props = new Properties(); setPropertiesFromFields(props, sd); - writeProperties(to, sd, props); + writeProperties(to, props); } - public static void writeProperties(File to, StorageDirectory sd, - Properties props) throws IOException { - RandomAccessFile file = new RandomAccessFile(to, "rws"); - FileOutputStream out = null; - try { + public static void writeProperties(File to, Properties props) + throws IOException { + try (RandomAccessFile file = new RandomAccessFile(to, "rws"); + FileOutputStream out = new FileOutputStream(file.getFD())) { file.seek(0); - out = new FileOutputStream(file.getFD()); /* - * If server is interrupted before this line, + * If server is interrupted before this line, * the version file will remain unchanged. */ props.store(out, null); /* - * Now the new fields are flushed to the head of the file, but file - * length can still be larger then required and therefore the file can + * Now the new fields are flushed to the head of the file, but file + * length can still be larger then required and therefore the file can * contain whole or corrupted fields from its old contents in the end. * If server is interrupted here and restarted later these extra fields * either should not effect server behavior or should be handled * by the server correctly. */ file.setLength(out.getChannel().position()); - } finally { - if (out != null) { - out.close(); - } - file.close(); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/1403b84b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 0bd08dd..76789f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -44,17 +44,15 @@ import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DiskChecker; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.channels.FileLock; +import java.nio.channels.OverlappingFileLockException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -82,7 +80,6 @@ import java.util.concurrent.Future; public class DataStorage extends Storage { public final static String BLOCK_SUBDIR_PREFIX = "subdir"; - final static String COPY_FILE_PREFIX = "dncp_"; final static String STORAGE_DIR_DETACHED = "detach"; public final static String STORAGE_DIR_RBW = "rbw"; public final static String STORAGE_DIR_FINALIZED = "finalized"; @@ -614,20 +611,22 @@ public class DataStorage extends Storage { @Override public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException { File oldF = new File(sd.getRoot(), "storage"); - if (!oldF.exists()) + if (!oldF.exists()) { return false; + } // check the layout version inside the storage file // Lock and Read old storage file - RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws"); - FileLock oldLock = oldFile.getChannel().tryLock(); - try { + try (RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws"); + FileLock oldLock = oldFile.getChannel().tryLock()) { + if (null == oldLock) { + LOG.error("Unable to acquire file lock on path " + oldF.toString()); + throw new OverlappingFileLockException(); + } oldFile.seek(0); int oldVersion = oldFile.readInt(); - if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) + if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) { return false; - } finally { - oldLock.release(); - oldFile.close(); + } } return true; } @@ -1218,23 +1217,8 @@ public class DataStorage extends Storage { return; } if (!from.isDirectory()) { - if (from.getName().startsWith(COPY_FILE_PREFIX)) { - FileInputStream in = new FileInputStream(from); - try { - FileOutputStream out = new FileOutputStream(to); - try { - IOUtils.copyBytes(in, out, 16*1024); - hl.linkStats.countPhysicalFileCopies++; - } finally { - out.close(); - } - } finally { - in.close(); - } - } else { - HardLink.createHardLink(from, to); - hl.linkStats.countSingleLinks++; - } + HardLink.createHardLink(from, to); + hl.linkStats.countSingleLinks++; return; } // from is a directory @@ -1285,8 +1269,7 @@ public class DataStorage extends Storage { String[] otherNames = from.list(new java.io.FilenameFilter() { @Override public boolean accept(File dir, String name) { - return name.startsWith(BLOCK_SUBDIR_PREFIX) - || name.startsWith(COPY_FILE_PREFIX); + return name.startsWith(BLOCK_SUBDIR_PREFIX); } }); for(int i = 0; i < otherNames.length; i++)