Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 8BF44106F1 for ; Thu, 18 Jul 2013 22:13:26 +0000 (UTC) Received: (qmail 15326 invoked by uid 500); 18 Jul 2013 22:13:26 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 15214 invoked by uid 500); 18 Jul 2013 22:13:26 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 15206 invoked by uid 99); 18 Jul 2013 22:13:26 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 18 Jul 2013 22:13:26 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 18 Jul 2013 22:13:24 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id AC74623888E2; Thu, 18 Jul 2013 22:13:04 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1504686 - in /hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/tools/ src/test/java/org/apache/hadoop/hdfs/server/namenode/ Date: Thu, 18 Jul 2013 22:13:04 -0000 To: hdfs-commits@hadoop.apache.org From: cnauroth@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20130718221304.AC74623888E2@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: cnauroth Date: Thu Jul 18 22:13:03 2013 New Revision: 1504686 URL: http://svn.apache.org/r1504686 Log: HDFS-4996. Merging change r1504682 from branch-2 to branch-2.1-beta. Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1504686&r1=1504685&r2=1504686&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jul 18 22:13:03 2013 @@ -202,6 +202,9 @@ Release 2.1.0-beta - 2013-07-02 HDFS-4992. Make balancer's mover thread count and dispatcher thread count configurable. (Max Lapan via szetszwo) + HDFS-4996. ClientProtocol#metaSave can be made idempotent by overwriting the + output file instead of appending to it. (cnauroth) + OPTIMIZATIONS HDFS-4465. Optimize datanode ReplicasMap and ReplicaInfo. (atm) Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1504686&r1=1504685&r2=1504686&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Jul 18 22:13:03 2013 @@ -1180,7 +1180,7 @@ public class FSNamesystem implements Nam checkOperation(OperationCategory.UNCHECKED); File file = new File(System.getProperty("hadoop.log.dir"), filename); PrintWriter out = new PrintWriter(new BufferedWriter( - new OutputStreamWriter(new FileOutputStream(file, true), Charsets.UTF_8))); + new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8))); metaSave(out); out.flush(); out.close(); Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1504686&r1=1504685&r2=1504686&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Thu Jul 18 22:13:03 2013 @@ -628,6 +628,7 @@ public class DFSAdmin extends FsShell { String metaSave = "-metasave : \tSave Namenode's primary data structures\n" + "\t\tto in the directory specified by hadoop.log.dir property.\n" + + "\t\t is overwritten if it exists.\n" + "\t\t will contain one line for each of the following\n" + "\t\t\t1. Datanodes heart beating with Namenode\n" + "\t\t\t2. Blocks waiting to be replicated\n" + Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java?rev=1504686&r1=1504685&r2=1504686&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java Thu Jul 18 22:13:03 2013 @@ -18,9 +18,11 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.BufferedReader; import java.io.DataInputStream; +import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; @@ -31,6 +33,7 @@ import org.apache.hadoop.fs.CommonConfig import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -47,6 +50,7 @@ public class TestMetaSave { static final int blockSize = 8192; private static MiniDFSCluster cluster = null; private static FileSystem fileSys = null; + private static FSNamesystem namesystem = null; private void createFile(FileSystem fileSys, Path name) throws IOException { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() @@ -72,6 +76,7 @@ public class TestMetaSave { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); + namesystem = cluster.getNamesystem(); } /** @@ -79,9 +84,6 @@ public class TestMetaSave { */ @Test public void testMetaSave() throws IOException, InterruptedException { - - final FSNamesystem namesystem = cluster.getNamesystem(); - for (int i = 0; i < 2; i++) { Path file = new Path("/filestatus" + i); createFile(fileSys, file); @@ -95,9 +97,8 @@ public class TestMetaSave { namesystem.metaSave("metasave.out.txt"); // Verification - String logFile = System.getProperty("hadoop.log.dir") + "/" - + "metasave.out.txt"; - FileInputStream fstream = new FileInputStream(logFile); + FileInputStream fstream = new FileInputStream(getLogFile( + "metasave.out.txt")); DataInputStream in = new DataInputStream(fstream); BufferedReader reader = null; try { @@ -124,9 +125,6 @@ public class TestMetaSave { @Test public void testMetasaveAfterDelete() throws IOException, InterruptedException { - - final FSNamesystem namesystem = cluster.getNamesystem(); - for (int i = 0; i < 2; i++) { Path file = new Path("/filestatus" + i); createFile(fileSys, file); @@ -142,11 +140,10 @@ public class TestMetaSave { namesystem.metaSave("metasaveAfterDelete.out.txt"); // Verification - String logFile = System.getProperty("hadoop.log.dir") + "/" - + "metasaveAfterDelete.out.txt"; BufferedReader reader = null; try { - FileInputStream fstream = new FileInputStream(logFile); + FileInputStream fstream = new FileInputStream(getLogFile( + "metasaveAfterDelete.out.txt")); DataInputStream in = new DataInputStream(fstream); reader = new BufferedReader(new InputStreamReader(in)); reader.readLine(); @@ -166,6 +163,42 @@ public class TestMetaSave { } } + /** + * Tests that metasave overwrites the output file (not append). + */ + @Test + public void testMetaSaveOverwrite() throws Exception { + // metaSave twice. + namesystem.metaSave("metaSaveOverwrite.out.txt"); + namesystem.metaSave("metaSaveOverwrite.out.txt"); + + // Read output file. + FileInputStream fis = null; + InputStreamReader isr = null; + BufferedReader rdr = null; + try { + fis = new FileInputStream(getLogFile("metaSaveOverwrite.out.txt")); + isr = new InputStreamReader(fis); + rdr = new BufferedReader(isr); + + // Validate that file was overwritten (not appended) by checking for + // presence of only one "Live Datanodes" line. + boolean foundLiveDatanodesLine = false; + String line = rdr.readLine(); + while (line != null) { + if (line.startsWith("Live Datanodes")) { + if (foundLiveDatanodesLine) { + fail("multiple Live Datanodes lines, output file not overwritten"); + } + foundLiveDatanodesLine = true; + } + line = rdr.readLine(); + } + } finally { + IOUtils.cleanup(null, rdr, isr, fis); + } + } + @AfterClass public static void tearDown() throws IOException { if (fileSys != null) @@ -173,4 +206,14 @@ public class TestMetaSave { if (cluster != null) cluster.shutdown(); } + + /** + * Returns a File for the given name inside the log directory. + * + * @param name String file name + * @return File for given name inside log directory + */ + private static File getLogFile(String name) { + return new File(System.getProperty("hadoop.log.dir"), name); + } }