hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r704186 - in /hadoop/core/trunk: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java src/test/org/apache/hadoop/hdfs/TestRestartDFS.java
Date Mon, 13 Oct 2008 17:55:54 GMT
Author: szetszwo
Date: Mon Oct 13 10:55:54 2008
New Revision: 704186

URL: http://svn.apache.org/viewvc?rev=704186&view=rev
Log:
HADOOP-4395. The FSEditLog loading is incorrect for the case OP_SET_OWNER. (szetszwo)

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=704186&r1=704185&r2=704186&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Oct 13 10:55:54 2008
@@ -902,6 +902,9 @@
     HADOOP-4228. dfs datanoe metrics, bytes_read and bytes_written, overflow
     due to incorrect type used. (hairong)
 
+    HADOOP-4395. The FSEditLog loading is incorrect for the case OP_SET_OWNER.
+    (szetszwo)
+
 Release 0.18.1 - 2008-09-17
 
   IMPROVEMENTS

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=704186&r1=704185&r2=704186&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon Oct
13 10:55:54 2008
@@ -732,7 +732,8 @@
             throw new IOException("Unexpected opcode " + opcode
                                   + " for version " + logVersion);
           fsDir.unprotectedSetOwner(FSImage.readString(in),
-              FSImage.readString(in), FSImage.readString(in));
+              FSImage.readString_EmptyAsNull(in),
+              FSImage.readString_EmptyAsNull(in));
           break;
         }
         case OP_SET_NS_QUOTA: {

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=704186&r1=704185&r2=704186&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java Mon Oct
13 10:55:54 2008
@@ -1540,6 +1540,11 @@
     return U_STR.toString();
   }
 
+  static String readString_EmptyAsNull(DataInputStream in) throws IOException {
+    final String s = readString(in);
+    return s.isEmpty()? null: s;
+  }
+
   static byte[] readBytes(DataInputStream in) throws IOException {
     U_STR.readFields(in);
     int len = U_STR.getLength();

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java?rev=704186&r1=704185&r2=704186&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java Mon Oct 13 10:55:54
2008
@@ -18,12 +18,10 @@
 
 package org.apache.hadoop.hdfs;
 
-import java.io.IOException;
-import java.util.Random;
-import junit.framework.*;
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
@@ -31,30 +29,31 @@
  * A JUnit test for checking if restarting DFS preserves integrity.
  */
 public class TestRestartDFS extends TestCase {
-  
-  private static Configuration conf = new Configuration();
-
-  public TestRestartDFS(String testName) {
-    super(testName);
-  }
-
-  protected void setUp() throws Exception {
-  }
-
-  protected void tearDown() throws Exception {
-  }
-  
   /** check if DFS remains in proper condition after a restart */
   public void testRestartDFS() throws Exception {
+    final Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
     DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
-    Path root = new Path("/");
-    long modificationTime;
+
+    final String dir = "/srcdat";
+    final Path rootpath = new Path("/");
+    final Path dirpath = new Path(dir);
+
+    long rootmtime;
+    FileStatus rootstatus;
+    FileStatus dirstatus;
+
     try {
       cluster = new MiniDFSCluster(conf, 4, true, null);
       FileSystem fs = cluster.getFileSystem();
-      files.createFiles(fs, "/srcdat");
-      modificationTime = fs.getFileStatus(root).getModificationTime();
+      files.createFiles(fs, dir);
+
+      rootmtime = fs.getFileStatus(rootpath).getModificationTime();
+      rootstatus = fs.getFileStatus(dirpath);
+      dirstatus = fs.getFileStatus(dirpath);
+
+      fs.setOwner(rootpath, rootstatus.getOwner() + "_XXX", null);
+      fs.setOwner(dirpath, null, dirstatus.getGroup() + "_XXX");
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     }
@@ -62,11 +61,19 @@
       // Here we restart the MiniDFScluster without formatting namenode
       cluster = new MiniDFSCluster(conf, 4, false, null);
       FileSystem fs = cluster.getFileSystem();
-      assertEquals(modificationTime,
-                   fs.getFileStatus(root).getModificationTime());
       assertTrue("Filesystem corrupted after restart.",
-                 files.checkFiles(fs, "/srcdat"));
-      files.cleanup(fs, "/srcdat");
+                 files.checkFiles(fs, dir));
+
+      final FileStatus newrootstatus = fs.getFileStatus(rootpath);
+      assertEquals(rootmtime, newrootstatus.getModificationTime());
+      assertEquals(rootstatus.getOwner() + "_XXX", newrootstatus.getOwner());
+      assertEquals(rootstatus.getGroup(), newrootstatus.getGroup());
+
+      final FileStatus newdirstatus = fs.getFileStatus(dirpath);
+      assertEquals(dirstatus.getOwner(), newdirstatus.getOwner());
+      assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
+
+      files.cleanup(fs, dir);
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     }



Mime
View raw message