hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1570389 - in /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/qjournal/server/ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/main/java/org/apache/hadoop/hdfs/server/data...
Date Thu, 20 Feb 2014 23:21:11 GMT
Author: arp
Date: Thu Feb 20 23:21:11 2014
New Revision: 1570389

URL: http://svn.apache.org/r1570389
Log:
HDFS-5987. Fix findbugs warnings in Rolling Upgrade branch. (Contributed by szetszwo)

Modified:
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt?rev=1570389&r1=1570388&r2=1570389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt
Thu Feb 20 23:21:11 2014
@@ -65,3 +65,7 @@ HDFS-5535 subtasks:
 
     HDFS-5985. SimulatedFSDataset#disableAndPurgeTrashStorage should not throw
     UnsupportedOperationException. (jing9 via kihwal)
+
+    HDFS-5987. Fix findbugs warnings in Rolling Upgrade branch. (seztszwo via
+    Arpit Agarwal)
+

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java?rev=1570389&r1=1570388&r2=1570389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
Thu Feb 20 23:21:11 2014
@@ -1037,7 +1037,7 @@ public class Journal implements Closeabl
     storage.getJournalManager().doRollback();
   }
 
-  public void discardSegments(long startTxId) throws IOException {
+  synchronized void discardSegments(long startTxId) throws IOException {
     storage.getJournalManager().discardSegments(startTxId);
     // we delete all the segments after the startTxId. let's reset committedTxnId 
     committedTxnId.set(startTxId - 1);

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1570389&r1=1570388&r2=1570389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
Thu Feb 20 23:21:11 2014
@@ -27,21 +27,21 @@ import java.util.Properties;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.util.Daemon;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Manages storage for the set of BlockPoolSlices which share a particular 
  * block pool id, on this DataNode.
@@ -382,8 +382,9 @@ public class BlockPoolSliceStorage exten
    * locations under current/
    *
    * @param trashRoot
+   * @throws IOException 
    */
-  private int restoreBlockFilesFromTrash(File trashRoot) {
+  private int restoreBlockFilesFromTrash(File trashRoot) throws IOException {
     int filesRestored = 0;
     File restoreDirectory = null;
 
@@ -395,10 +396,15 @@ public class BlockPoolSliceStorage exten
 
       if (restoreDirectory == null) {
         restoreDirectory = new File(getRestoreDirectory(child));
-        restoreDirectory.mkdirs();
+        if (!restoreDirectory.mkdirs()) {
+          throw new IOException("Failed to create directory " + restoreDirectory);
+        }
       }
 
-      child.renameTo(new File(restoreDirectory, child.getName()));
+      final File newChild = new File(restoreDirectory, child.getName());
+      if (!child.renameTo(newChild)) {
+        throw new IOException("Failed to rename " + child + " to " + newChild);
+      }
       ++filesRestored;
     }
 

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java?rev=1570389&r1=1570388&r2=1570389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
Thu Feb 20 23:21:11 2014
@@ -198,7 +198,10 @@ class FsDatasetAsyncDiskService {
     private boolean moveFiles() {
       File newBlockFile = new File(trashDirectory, blockFile.getName());
       File newMetaFile = new File(trashDirectory, metaFile.getName());
-      (new File(trashDirectory)).mkdirs();
+      if (!new File(trashDirectory).mkdirs()) {
+        LOG.error("Failed to create trash directory " + trashDirectory);
+        return false;
+      }
 
       if (LOG.isDebugEnabled()) {
         LOG.debug("Moving files " + blockFile.getName() + " and " +

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java?rev=1570389&r1=1570388&r2=1570389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
Thu Feb 20 23:21:11 2014
@@ -65,16 +65,13 @@ public abstract class MD5FileUtils {
   }
   
   /**
-   * Read the md5 checksum stored alongside the given file, or null
-   * if no md5 is stored.
+   * Read the md5 file stored alongside the given data file
+   * and match the md5 file content.
    * @param dataFile the file containing data
-   * @return the checksum stored in dataFile.md5
+   * @return a matcher with two matched groups
+   *   where group(1) is the md5 string and group(2) is the data file path.
    */
-  public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
-    File md5File = getDigestFileForFile(dataFile);
-
-    String md5Line;
-    
+  private static Matcher readStoredMd5(File md5File) throws IOException {
     if (!md5File.exists()) {
       return null;
     }
@@ -82,6 +79,7 @@ public abstract class MD5FileUtils {
     BufferedReader reader =
         new BufferedReader(new InputStreamReader(new FileInputStream(
             md5File), Charsets.UTF_8));
+    String md5Line;
     try {
       md5Line = reader.readLine();
       if (md5Line == null) { md5Line = ""; }
@@ -94,9 +92,20 @@ public abstract class MD5FileUtils {
     
     Matcher matcher = LINE_REGEX.matcher(md5Line);
     if (!matcher.matches()) {
-      throw new IOException("Invalid MD5 file at " + md5File
-          + " (does not match expected pattern)");
+      throw new IOException("Invalid MD5 file " + md5File + ": the content \""
+          + md5Line + "\" does not match the expected pattern.");
     }
+    return matcher;
+  }
+
+  /**
+   * Read the md5 checksum stored alongside the given data file.
+   * @param dataFile the file containing data
+   * @return the checksum stored in dataFile.md5
+   */
+  public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
+    final File md5File = getDigestFileForFile(dataFile);
+    final Matcher matcher = readStoredMd5(md5File);
     String storedHash = matcher.group(1);
     File referencedFile = new File(matcher.group(2));
 
@@ -155,19 +164,8 @@ public abstract class MD5FileUtils {
 
   public static void renameMD5File(File oldDataFile, File newDataFile)
       throws IOException {
-    File fromFile = getDigestFileForFile(oldDataFile);
-    BufferedReader in = null;
-    final String digestString;
-    try {
-      in = new BufferedReader(new InputStreamReader(new FileInputStream(
-          fromFile), Charsets.UTF_8));
-      String line = in.readLine();
-      String[] split = line.split(" \\*");
-      digestString = split[0];
-    } finally {
-      IOUtils.cleanup(LOG, in);
-    }
-
+    final File fromFile = getDigestFileForFile(oldDataFile);
+    final String digestString = readStoredMd5(fromFile).group(1);
     saveMD5File(newDataFile, digestString);
 
     if (!fromFile.delete()) {

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java?rev=1570389&r1=1570388&r2=1570389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
Thu Feb 20 23:21:11 2014
@@ -253,33 +253,57 @@ public class TestRollingUpgrade {
 
       final Path foo = new Path("/foo");
       final Path bar = new Path("/bar");
+      cluster.getFileSystem().mkdirs(foo);
 
-      {
-        final DistributedFileSystem dfs = cluster.getFileSystem();
-        dfs.mkdirs(foo);
-  
-        //start rolling upgrade
-        dfs.rollingUpgrade(RollingUpgradeAction.START);
-  
-        dfs.mkdirs(bar);
-        
-        Assert.assertTrue(dfs.exists(foo));
-        Assert.assertTrue(dfs.exists(bar));
-      }
+      startRollingUpgrade(foo, bar, cluster);
+      cluster.getFileSystem().rollEdits();
+      cluster.getFileSystem().rollEdits();
+      rollbackRollingUpgrade(foo, bar, cluster);
+
+      startRollingUpgrade(foo, bar, cluster);
+      cluster.getFileSystem().rollEdits();
+      cluster.getFileSystem().rollEdits();
+      rollbackRollingUpgrade(foo, bar, cluster);
 
-      // Restart should succeed!
+      startRollingUpgrade(foo, bar, cluster);
       cluster.restartNameNode();
+      rollbackRollingUpgrade(foo, bar, cluster);
 
-      cluster.restartNameNode("-rollingUpgrade", "rollback");
-      {
-        final DistributedFileSystem dfs = cluster.getFileSystem();
-        Assert.assertTrue(dfs.exists(foo));
-        Assert.assertFalse(dfs.exists(bar));
-      }
+      startRollingUpgrade(foo, bar, cluster);
+      cluster.restartNameNode();
+      rollbackRollingUpgrade(foo, bar, cluster);
+
+      startRollingUpgrade(foo, bar, cluster);
+      rollbackRollingUpgrade(foo, bar, cluster);
+
+      startRollingUpgrade(foo, bar, cluster);
+      rollbackRollingUpgrade(foo, bar, cluster);
     } finally {
       if(cluster != null) cluster.shutdown();
     }
   }
+  
+  private static void startRollingUpgrade(Path foo, Path bar,
+      MiniDFSCluster cluster) throws IOException {
+    final DistributedFileSystem dfs = cluster.getFileSystem();
+
+    //start rolling upgrade
+    dfs.rollingUpgrade(RollingUpgradeAction.START);
+
+    dfs.mkdirs(bar);
+    
+    Assert.assertTrue(dfs.exists(foo));
+    Assert.assertTrue(dfs.exists(bar));
+  }
+  
+  private static void rollbackRollingUpgrade(Path foo, Path bar,
+      MiniDFSCluster cluster) throws IOException {
+    cluster.restartNameNode("-rollingUpgrade", "rollback");
+
+    final DistributedFileSystem dfs = cluster.getFileSystem();
+    Assert.assertTrue(dfs.exists(foo));
+    Assert.assertFalse(dfs.exists(bar));
+  }
 
   @Test
   public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {



Mime
View raw message