hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1432796 [3/4] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ dev-support/ src/contrib/bkjournal/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/...
Date Mon, 14 Jan 2013 03:44:46 GMT
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Mon Jan 14 03:44:35 2013
@@ -58,6 +58,7 @@ public class INodeFileUnderConstruction 
     Preconditions.checkArgument(!(file instanceof INodeFileUnderConstruction),
         "file is already an INodeFileUnderConstruction");
     final INodeFileUnderConstruction uc = new INodeFileUnderConstruction(
+        file.getId(),
         file.getLocalNameBytes(),
         file.getFileReplication(),
         file.getModificationTime(),
@@ -75,18 +76,20 @@ public class INodeFileUnderConstruction 
   private final String clientMachine;
   private final DatanodeDescriptor clientNode; // if client is a cluster node too.
   
-  INodeFileUnderConstruction(PermissionStatus permissions,
+  INodeFileUnderConstruction(long id,
+                             PermissionStatus permissions,
                              short replication,
                              long preferredBlockSize,
                              long modTime,
                              String clientName,
                              String clientMachine,
                              DatanodeDescriptor clientNode) {
-    this(null, replication, modTime, preferredBlockSize, BlockInfo.EMPTY_ARRAY,
+    this(id, null, replication, modTime, preferredBlockSize, BlockInfo.EMPTY_ARRAY,
         permissions.applyUMask(UMASK), clientName, clientMachine, clientNode);
   }
 
-  INodeFileUnderConstruction(byte[] name,
+  INodeFileUnderConstruction(long id,
+                             byte[] name,
                              short blockReplication,
                              long modificationTime,
                              long preferredBlockSize,
@@ -95,7 +98,7 @@ public class INodeFileUnderConstruction 
                              String clientName,
                              String clientMachine,
                              DatanodeDescriptor clientNode) {
-    super(name, perm, modificationTime, modificationTime,
+    super(id, name, perm, modificationTime, modificationTime,
         blocks, blockReplication, preferredBlockSize);
     this.clientName = clientName;
     this.clientMachine = clientMachine;
@@ -140,7 +143,7 @@ public class INodeFileUnderConstruction 
    */
   protected INodeFile toINodeFile(long mtime) {
     assertAllBlocksComplete();
-    return new INodeFile(getLocalNameBytes(), getPermissionStatus(),
+    return new INodeFile(getId(), getLocalNameBytes(), getPermissionStatus(),
         mtime, getModificationTime(),
         getBlocks(), getFileReplication(), getPreferredBlockSize());
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java Mon Jan 14 03:44:35 2013
@@ -28,9 +28,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
 public class INodeSymlink extends INode {
   private final byte[] symlink; // The target URI
 
-  INodeSymlink(String value, long mtime, long atime,
-               PermissionStatus permissions) {
-    super(permissions, mtime, atime);
+  INodeSymlink(long id, String value, long mtime, long atime,
+      PermissionStatus permissions) {
+    super(id, permissions, mtime, atime);
     this.symlink = DFSUtil.string2Bytes(value);
   }
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java Mon Jan 14 03:44:35 2013
@@ -164,7 +164,7 @@ public class NameNodeResourceChecker {
     
     CheckedVolume newVolume = new CheckedVolume(dir, required);
     CheckedVolume volume = volumes.get(newVolume.getVolume());
-    if (volume == null || (volume != null && !volume.isRequired())) {
+    if (volume == null || !volume.isRequired()) {
       volumes.put(newVolume.getVolume(), newVolume);
     }
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java Mon Jan 14 03:44:35 2013
@@ -17,7 +17,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
-import java.io.PrintStream;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
 import java.security.PrivilegedExceptionAction;
 
 import javax.servlet.ServletContext;
@@ -32,6 +33,8 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 
+import com.google.common.base.Charsets;
+
 /**
  * Renew delegation tokens over http for use in hftp.
  */
@@ -73,7 +76,8 @@ public class RenewDelegationTokenServlet
           return nn.getRpcServer().renewDelegationToken(token);
         }
       });
-      PrintStream os = new PrintStream(resp.getOutputStream());
+      final PrintWriter os = new PrintWriter(new OutputStreamWriter(
+          resp.getOutputStream(), Charsets.UTF_8));
       os.println(result);
       os.close();
     } catch(Exception e) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Mon Jan 14 03:44:35 2013
@@ -605,7 +605,9 @@ public class SecondaryNameNode implement
       terminate(ret);
     }
 
-    secondary.startCheckpointThread();
+    if (secondary != null) {
+      secondary.startCheckpointThread();
+    }
   }
   
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Mon Jan 14 03:44:35 2013
@@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.server.na
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.OutputStream;
-import java.io.PrintStream;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
 import java.net.InetAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -102,6 +103,7 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
+import com.google.common.base.Charsets;
 import com.sun.jersey.spi.container.ResourceFilters;
 
 /** Web-hdfs NameNode implementation. */
@@ -713,7 +715,8 @@ public class NamenodeWebHdfsMethods {
     return new StreamingOutput() {
       @Override
       public void write(final OutputStream outstream) throws IOException {
-        final PrintStream out = new PrintStream(outstream);
+        final PrintWriter out = new PrintWriter(new OutputStreamWriter(
+            outstream, Charsets.UTF_8));
         out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
             + FileStatus.class.getSimpleName() + "\":[");
 
@@ -736,6 +739,7 @@ public class NamenodeWebHdfsMethods {
         
         out.println();
         out.println("]}}");
+        out.flush();
       }
     };
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java Mon Jan 14 03:44:35 2013
@@ -90,8 +90,8 @@ public class ReceivedDeletedBlockInfo {
     ReceivedDeletedBlockInfo other = (ReceivedDeletedBlockInfo) o;
     return this.block.equals(other.getBlock())
         && this.status == other.status
-        && (this.delHints == other.delHints ||
-            this.delHints != null && this.delHints.equals(other.delHints));
+        && this.delHints != null
+        && this.delHints.equals(other.delHints);
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java Mon Jan 14 03:44:35 2013
@@ -89,6 +89,9 @@ public class RemoteEditLog implements Co
     new Function<RemoteEditLog, Long>() {
       @Override
       public Long apply(RemoteEditLog log) {
+        if (null == log) {
+          return HdfsConstants.INVALID_TXID;
+        }
         return log.getStartTxId();
       }
     };

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Mon Jan 14 03:44:35 2013
@@ -55,6 +55,8 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.GenericOptionsParser;
 
+import com.google.common.base.Charsets;
+
 /**
  * Fetch a DelegationToken from the current Namenode and store it in the
  * specified file.
@@ -269,8 +271,8 @@ public class DelegationTokenFetcher {
         throw new IOException("Error renewing token: " + 
             connection.getResponseMessage());
       }
-      in = new BufferedReader(new InputStreamReader
-          (connection.getInputStream()));
+      in = new BufferedReader(
+          new InputStreamReader(connection.getInputStream(), Charsets.UTF_8));
       long result = Long.parseLong(in.readLine());
       in.close();
       return result;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java Mon Jan 14 03:44:35 2013
@@ -63,7 +63,7 @@ import org.apache.hadoop.classification.
 @InterfaceAudience.Private
 public class JMXGet {
 
-  private static final String format = "%s=%s\n";
+  private static final String format = "%s=%s%n";
   private ArrayList<ObjectName> hadoopObjectNames;
   private MBeanServerConnection mbsc;
   private String service = "NameNode", port = "", server = "localhost";
@@ -126,7 +126,8 @@ public class JMXGet {
           continue;
         }
       }
-      err("Info: key = " + key + "; val = "+ val.getClass() +":"+ val);
+      err("Info: key = " + key + "; val = " +
+          (val == null ? "null" : val.getClass()) + ":" + val);
       break;
     }
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java Mon Jan 14 03:44:35 2013
@@ -18,9 +18,10 @@
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileNotFoundException;
-import java.io.FileReader;
 import java.io.IOException;
+import java.io.InputStreamReader;
 import java.util.Stack;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -39,6 +40,8 @@ import org.xml.sax.XMLReader;
 import org.xml.sax.helpers.DefaultHandler;
 import org.xml.sax.helpers.XMLReaderFactory;
 
+import com.google.common.base.Charsets;
+
 /**
  * OfflineEditsXmlLoader walks an EditsVisitor over an OEV XML file
  */
@@ -48,7 +51,7 @@ class OfflineEditsXmlLoader 
     extends DefaultHandler implements OfflineEditsLoader {
   private final boolean fixTxIds;
   private final OfflineEditsVisitor visitor;
-  private final FileReader fileReader;
+  private final InputStreamReader fileReader;
   private ParseState state;
   private Stanza stanza;
   private Stack<Stanza> stanzaStack;
@@ -70,7 +73,8 @@ class OfflineEditsXmlLoader 
   public OfflineEditsXmlLoader(OfflineEditsVisitor visitor,
         File inputFile, OfflineEditsViewer.Flags flags) throws FileNotFoundException {
     this.visitor = visitor;
-    this.fileReader = new FileReader(inputFile);
+    this.fileReader =
+        new InputStreamReader(new FileInputStream(inputFile), Charsets.UTF_8);
     this.fixTxIds = flags.getFixTxIds();
   }
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java Mon Jan 14 03:44:35 2013
@@ -19,7 +19,8 @@ package org.apache.hadoop.hdfs.tools.off
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.io.PrintStream;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
 import java.util.Map;
 import java.util.HashMap;
 
@@ -29,6 +30,8 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 
+import com.google.common.base.Charsets;
+
 /**
  * StatisticsEditsVisitor implements text version of EditsVisitor
  * that aggregates counts of op codes processed
@@ -37,7 +40,7 @@ import org.apache.hadoop.hdfs.server.nam
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class StatisticsEditsVisitor implements OfflineEditsVisitor {
-  final private PrintStream out;
+  final private PrintWriter out;
 
   private int version = -1;
   private final Map<FSEditLogOpCodes, Long> opCodeCount =
@@ -52,7 +55,7 @@ public class StatisticsEditsVisitor impl
    * @param printToScreen Mirror output to screen?
    */
   public StatisticsEditsVisitor(OutputStream out) throws IOException {
-    this.out = new PrintStream(out);
+    this.out = new PrintWriter(new OutputStreamWriter(out, Charsets.UTF_8));
   }
 
   /** Start the visitor */

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java Mon Jan 14 03:44:35 2013
@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
+import java.io.FileOutputStream;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.io.OutputStreamWriter;
+
+import com.google.common.base.Charsets;
 
 /**
  * TextWriterImageProcessor mixes in the ability for ImageVisitor
@@ -34,7 +38,7 @@ import java.io.IOException;
 abstract class TextWriterImageVisitor extends ImageVisitor {
   private boolean printToScreen = false;
   private boolean okToWrite = false;
-  final private FileWriter fw;
+  final private OutputStreamWriter fw;
 
   /**
    * Create a processor that writes to the file named.
@@ -56,7 +60,7 @@ abstract class TextWriterImageVisitor ex
          throws IOException {
     super();
     this.printToScreen = printToScreen;
-    fw = new FileWriter(filename);
+    fw = new OutputStreamWriter(new FileOutputStream(filename), Charsets.UTF_8);
     okToWrite = true;
   }
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java Mon Jan 14 03:44:35 2013
@@ -96,7 +96,12 @@ public class DataTransferThrottler {
         // Wait for next period so that curReserve can be increased.
         try {
           wait( curPeriodEnd - now );
-        } catch (InterruptedException ignored) {}
+        } catch (InterruptedException e) {
+          // Abort throttle and reset interrupted status to make sure other
+          // interrupt handling higher in the call stack executes.
+          Thread.currentThread().interrupt();
+          break;
+        }
       } else if ( now <  (curPeriodStart + periodExtension)) {
         curPeriodStart = curPeriodEnd;
         curReserve += bytesPerPeriod;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java Mon Jan 14 03:44:35 2013
@@ -20,9 +20,9 @@ package org.apache.hadoop.hdfs.util;
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileInputStream;
-import java.io.FileReader;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.InputStreamReader;
 import java.security.DigestInputStream;
 import java.security.MessageDigest;
 import java.util.regex.Matcher;
@@ -34,6 +34,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.base.Charsets;
+
 /**
  * Static functions for dealing with files of the same format
  * that the Unix "md5sum" utility writes.
@@ -78,7 +80,8 @@ public abstract class MD5FileUtils {
     }
     
     BufferedReader reader =
-      new BufferedReader(new FileReader(md5File));
+        new BufferedReader(new InputStreamReader(new FileInputStream(
+            md5File), Charsets.UTF_8));
     try {
       md5Line = reader.readLine();
       if (md5Line == null) { md5Line = ""; }
@@ -138,7 +141,7 @@ public abstract class MD5FileUtils {
     String md5Line = digestString + " *" + dataFile.getName() + "\n";
     
     AtomicFileOutputStream afos = new AtomicFileOutputStream(md5File);
-    afos.write(md5Line.getBytes());
+    afos.write(md5Line.getBytes(Charsets.UTF_8));
     afos.close();
     LOG.debug("Saved MD5 " + digest + " to " + md5File);
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java Mon Jan 14 03:44:35 2013
@@ -19,14 +19,18 @@ package org.apache.hadoop.hdfs.util;
 
 import java.io.BufferedReader;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileReader;
 import java.io.IOException;
+import java.io.InputStreamReader;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.IOUtils;
 
+import com.google.common.base.Charsets;
+
 /**
  * Class that represents a file on disk which persistently stores
  * a single <code>long</code> value. The file is updated atomically
@@ -74,7 +78,7 @@ public class PersistentLongFile {
   public static void writeFile(File file, long val) throws IOException {
     AtomicFileOutputStream fos = new AtomicFileOutputStream(file);
     try {
-      fos.write(String.valueOf(val).getBytes());
+      fos.write(String.valueOf(val).getBytes(Charsets.UTF_8));
       fos.write('\n');
       fos.close();
       fos = null;
@@ -88,7 +92,9 @@ public class PersistentLongFile {
   public static long readFile(File file, long defaultVal) throws IOException {
     long val = defaultVal;
     if (file.exists()) {
-      BufferedReader br = new BufferedReader(new FileReader(file));
+      BufferedReader br = 
+          new BufferedReader(new InputStreamReader(new FileInputStream(
+              file), Charsets.UTF_8));
       try {
         val = Long.valueOf(br.readLine());
         br.close();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Mon Jan 14 03:44:35 2013
@@ -105,6 +105,8 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.util.Progressable;
 import org.mortbay.util.ajax.JSON;
 
+import com.google.common.base.Charsets;
+
 /** A FileSystem for HDFS over the web. */
 public class WebHdfsFileSystem extends FileSystem
     implements DelegationTokenRenewer.Renewable {
@@ -281,7 +283,7 @@ public class WebHdfsFileSystem extends F
             + "\" (parsed=\"" + parsed + "\")");
       }
     }
-    return (Map<?, ?>)JSON.parse(new InputStreamReader(in));
+    return (Map<?, ?>)JSON.parse(new InputStreamReader(in, Charsets.UTF_8));
   }
 
   private static Map<?, ?> validateResponse(final HttpOpParam.Op op,

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1426019-1432788

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c Mon Jan 14 03:44:35 2013
@@ -52,7 +52,7 @@ struct NativeMiniDfsCluster* nmdCreate(s
 
     if (!env) {
         fprintf(stderr, "nmdCreate: unable to construct JNIEnv.\n");
-        goto error;
+        return NULL;
     }
     cl = calloc(1, sizeof(struct NativeMiniDfsCluster));
     if (!cl) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto Mon Jan 14 03:44:35 2013
@@ -25,6 +25,7 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 package hadoop.hdfs;
 
+import "Security.proto";
 import "hdfs.proto";
 
 /**
@@ -76,7 +77,7 @@ message DeleteBlockPoolResponseProto {
  */
 message GetBlockLocalPathInfoRequestProto {
   required ExtendedBlockProto block = 1;
-  required BlockTokenIdentifierProto token = 2;
+  required hadoop.common.TokenProto token = 2;
 }
 
 /**
@@ -96,7 +97,7 @@ message GetBlockLocalPathInfoResponsePro
  */
 message GetHdfsBlockLocationsRequestProto {
   repeated ExtendedBlockProto blocks = 1;
-  repeated BlockTokenIdentifierProto tokens = 2;
+  repeated hadoop.common.TokenProto tokens = 2;
 }
 
 /**

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Mon Jan 14 03:44:35 2013
@@ -22,6 +22,7 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 package hadoop.hdfs;
 
+import "Security.proto";
 import "hdfs.proto";
 
 /**
@@ -419,11 +420,11 @@ message GetDelegationTokenRequestProto {
 }
 
 message GetDelegationTokenResponseProto {
-  required BlockTokenIdentifierProto token = 1;
+  optional hadoop.common.TokenProto token = 1;
 }
 
 message RenewDelegationTokenRequestProto {
-  required BlockTokenIdentifierProto token = 1;
+  required hadoop.common.TokenProto token = 1;
 }
 
 message RenewDelegationTokenResponseProto {
@@ -431,7 +432,7 @@ message RenewDelegationTokenResponseProt
 }
 
 message CancelDelegationTokenRequestProto {
-  required BlockTokenIdentifierProto token = 1;
+  required hadoop.common.TokenProto token = 1;
 }
 
 message CancelDelegationTokenResponseProto { // void response
@@ -448,7 +449,7 @@ message GetDataEncryptionKeyRequestProto
 }
 
 message GetDataEncryptionKeyResponseProto {
-  required DataEncryptionKeyProto dataEncryptionKey = 1;
+  optional DataEncryptionKeyProto dataEncryptionKey = 1;
 }
 
 message CreateSnapshotRequestProto {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Mon Jan 14 03:44:35 2013
@@ -24,6 +24,7 @@ option java_outer_classname = "DataTrans
 option java_generate_equals_and_hash = true;
 package hadoop.hdfs;
 
+import "Security.proto";
 import "hdfs.proto";
 
 message DataTransferEncryptorMessageProto {
@@ -39,7 +40,7 @@ message DataTransferEncryptorMessageProt
 
 message BaseHeaderProto {
   required ExtendedBlockProto block = 1;
-  optional BlockTokenIdentifierProto token = 2;
+  optional hadoop.common.TokenProto token = 2;
 }
 
 message ClientOperationHeaderProto {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Mon Jan 14 03:44:35 2013
@@ -19,11 +19,14 @@
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
 // by the client, server, and data transfer protocols.
 
+
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "HdfsProtos";
 option java_generate_equals_and_hash = true;
 package hadoop.hdfs;
 
+import "Security.proto";
+
 /**
  * Extended block idenfies a block
  */
@@ -36,16 +39,6 @@ message ExtendedBlockProto {
 }
 
 /**
- * Block Token
- */
-message BlockTokenIdentifierProto {
-  required bytes identifier = 1;
-  required bytes password = 2;
-  required string kind = 3;
-  required string service = 4;
-}
-
-/**
  * Identifies a Datanode
  */
 message DatanodeIDProto {
@@ -126,7 +119,7 @@ message LocatedBlockProto {
                                         // If block has few corrupt replicas, they are filtered and 
                                         // their locations are not part of this object
 
-  required BlockTokenIdentifierProto blockToken = 5;
+  required hadoop.common.TokenProto blockToken = 5;
 }
 
 message DataEncryptionKeyProto {

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1426019-1432788

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1426019-1432788

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1426019-1432788

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1426019-1432788

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Mon Jan 14 03:44:35 2013
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+import static org.junit.Assert.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
@@ -31,6 +32,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
@@ -176,6 +178,44 @@ public class TestDFSRollback {
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+      
+      log("Normal BlockPool rollback", numDirs);
+      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
+      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                                .format(false)
+                                                .manageDataDfsDirs(false)
+                                                .manageNameDfsDirs(false)
+                                                .startupOption(StartupOption.ROLLBACK)
+                                                .build();
+      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
+      UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current",
+          UpgradeUtilities.getCurrentBlockPoolID(cluster));
+      // Create a previous snapshot for the blockpool
+      UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous",
+          UpgradeUtilities.getCurrentBlockPoolID(cluster));
+      // Older LayoutVersion to make it rollback
+      storageInfo = new StorageInfo(
+          UpgradeUtilities.getCurrentLayoutVersion()+1,
+          UpgradeUtilities.getCurrentNamespaceID(cluster),
+          UpgradeUtilities.getCurrentClusterID(cluster),
+          UpgradeUtilities.getCurrentFsscTime(cluster));
+      // Create old VERSION file for each data dir
+      for (int i=0; i<dataNodeDirs.length; i++) {
+        Path bpPrevPath = new Path(dataNodeDirs[i] + "/current/"
+            + UpgradeUtilities.getCurrentBlockPoolID(cluster));
+        UpgradeUtilities.createBlockPoolVersionFile(
+            new File(bpPrevPath.toString()),
+            storageInfo,
+            UpgradeUtilities.getCurrentBlockPoolID(cluster));
+      }
+
+      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
+      assertTrue(cluster.isDataNodeUp());
+
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
       log("NameNode rollback without existing previous dir", numDirs);
       UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java Mon Jan 14 03:44:35 2013
@@ -158,7 +158,7 @@ public class TestLargeBlock {
    * Test for block size of 2GB + 512B
    * @throws IOException in case of errors
    */
-  @Test
+  @Test(timeout = 120000)
   public void testLargeBlockSize() throws IOException {
     final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B
     runTest(blockSize);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Mon Jan 14 03:44:35 2013
@@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
@@ -69,7 +69,9 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DataChecksum;
 import org.junit.Test;
 
 import com.google.common.base.Joiner;
@@ -374,7 +376,7 @@ public class TestPBHelper {
     Token<BlockTokenIdentifier> token = new Token<BlockTokenIdentifier>(
         "identifier".getBytes(), "password".getBytes(), new Text("kind"),
         new Text("service"));
-    BlockTokenIdentifierProto tokenProto = PBHelper.convert(token);
+    TokenProto tokenProto = PBHelper.convert(token);
     Token<BlockTokenIdentifier> token2 = PBHelper.convert(tokenProto);
     compare(token, token2);
   }
@@ -403,30 +405,74 @@ public class TestPBHelper {
     assertEquals(expected.getKind(), actual.getKind());
     assertEquals(expected.getService(), actual.getService());
   }
-  
-  @Test
-  public void testConvertLocatedBlock() {
-    DatanodeInfo [] dnInfos = {
-        DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS),
-        DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED),
-        DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL)
+
+  private void compare(LocatedBlock expected, LocatedBlock actual) {
+    assertEquals(expected.getBlock(), actual.getBlock());
+    compare(expected.getBlockToken(), actual.getBlockToken());
+    assertEquals(expected.getStartOffset(), actual.getStartOffset());
+    assertEquals(expected.isCorrupt(), actual.isCorrupt());
+    DatanodeInfo [] ei = expected.getLocations();
+    DatanodeInfo [] ai = actual.getLocations();
+    assertEquals(ei.length, ai.length);
+    for (int i = 0; i < ei.length ; i++) {
+      compare(ei[i], ai[i]);
+    }
+  }
+
+  private LocatedBlock createLocatedBlock() {
+    DatanodeInfo[] dnInfos = {
+        DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
+            AdminStates.DECOMMISSION_INPROGRESS),
+        DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
+            AdminStates.DECOMMISSIONED),
+        DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
+            AdminStates.NORMAL)
     };
     LocatedBlock lb = new LocatedBlock(
         new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
+    lb.setBlockToken(new Token<BlockTokenIdentifier>(
+        "identifier".getBytes(), "password".getBytes(), new Text("kind"),
+        new Text("service")));
+    return lb;
+  }
+
+  @Test
+  public void testConvertLocatedBlock() {
+    LocatedBlock lb = createLocatedBlock();
     LocatedBlockProto lbProto = PBHelper.convert(lb);
     LocatedBlock lb2 = PBHelper.convert(lbProto);
-    assertEquals(lb.getBlock(), lb2.getBlock());
-    compare(lb.getBlockToken(), lb2.getBlockToken());
-    assertEquals(lb.getStartOffset(), lb2.getStartOffset());
-    assertEquals(lb.isCorrupt(), lb2.isCorrupt());
-    DatanodeInfo [] dnInfos2 = lb2.getLocations();
-    assertEquals(dnInfos.length, dnInfos2.length);
-    for (int i = 0; i < dnInfos.length ; i++) {
-      compare(dnInfos[i], dnInfos2[i]);
+    compare(lb,lb2);
+  }
+
+  @Test
+  public void testConvertLocatedBlockList() {
+    ArrayList<LocatedBlock> lbl = new ArrayList<LocatedBlock>();
+    for (int i=0;i<3;i++) {
+      lbl.add(createLocatedBlock());
+    }
+    List<LocatedBlockProto> lbpl = PBHelper.convertLocatedBlock2(lbl);
+    List<LocatedBlock> lbl2 = PBHelper.convertLocatedBlock(lbpl);
+    assertEquals(lbl.size(), lbl2.size());
+    for (int i=0;i<lbl.size();i++) {
+      compare(lbl.get(i), lbl2.get(2));
     }
   }
   
   @Test
+  public void testConvertLocatedBlockArray() {
+    LocatedBlock [] lbl = new LocatedBlock[3];
+    for (int i=0;i<3;i++) {
+      lbl[i] = createLocatedBlock();
+    }
+    LocatedBlockProto [] lbpl = PBHelper.convertLocatedBlock(lbl);
+    LocatedBlock [] lbl2 = PBHelper.convertLocatedBlock(lbpl);
+    assertEquals(lbl.length, lbl2.length);
+    for (int i=0;i<lbl.length;i++) {
+      compare(lbl[i], lbl2[i]);
+    }
+  }
+
+  @Test
   public void testConvertDatanodeRegistration() {
     DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
     BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
@@ -471,4 +517,20 @@ public class TestPBHelper {
       }
     }
   }
+  
+  @Test
+  public void testChecksumTypeProto() {
+    assertEquals(DataChecksum.Type.NULL,
+        PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL));
+    assertEquals(DataChecksum.Type.CRC32,
+        PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32));
+    assertEquals(DataChecksum.Type.CRC32C,
+        PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C));
+    assertEquals(PBHelper.convert(DataChecksum.Type.NULL),
+        HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL);
+    assertEquals(PBHelper.convert(DataChecksum.Type.CRC32),
+        HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32);
+    assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C),
+        HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C);
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java Mon Jan 14 03:44:35 2013
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.net.NetworkTopology;
 import org.junit.Test;
+import junit.framework.Assert;
 
 /**
  * This class tests if a balancer schedules tasks correctly.
@@ -174,12 +175,25 @@ public class TestBalancerWithNodeGroup {
     LOG.info("Rebalancing with default factor.");
     waitForBalancer(totalUsedSpace, totalCapacity);
   }
+  
+  private void runBalancerCanFinish(Configuration conf,
+      long totalUsedSpace, long totalCapacity) throws Exception {
+    waitForHeartBeat(totalUsedSpace, totalCapacity);
+
+    // start rebalancing
+    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
+    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
+    Assert.assertTrue(r == Balancer.ReturnStatus.SUCCESS.code ||
+        (r == Balancer.ReturnStatus.NO_MOVE_PROGRESS.code));
+    waitForHeartBeat(totalUsedSpace, totalCapacity);
+    LOG.info("Rebalancing with default factor.");
+  }
 
   /**
    * Create a cluster with even distribution, and a new empty node is added to
    * the cluster, then test rack locality for balancer policy. 
    */
-  @Test
+  @Test(timeout=60000)
   public void testBalancerWithRackLocality() throws Exception {
     Configuration conf = createConf();
     long[] capacities = new long[]{CAPACITY, CAPACITY};
@@ -217,7 +231,7 @@ public class TestBalancerWithNodeGroup {
       totalCapacity += newCapacity;
 
       // run balancer and validate results
-      runBalancer(conf, totalUsedSpace, totalCapacity);
+      runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
       
       DatanodeInfo[] datanodeReport = 
               client.getDatanodeReport(DatanodeReportType.ALL);
@@ -245,7 +259,7 @@ public class TestBalancerWithNodeGroup {
    * Create a cluster with even distribution, and a new empty node is added to
    * the cluster, then test node-group locality for balancer policy.
    */
-  @Test
+  @Test(timeout=60000)
   public void testBalancerWithNodeGroup() throws Exception {
     Configuration conf = createConf();
     long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
@@ -289,4 +303,49 @@ public class TestBalancerWithNodeGroup {
       cluster.shutdown();
     }
   }
+  
+  /**
+   * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
+   * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
+   * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
+   * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
+   * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
+   * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
+   * to end in 5 iterations without move block process.
+   */
+  @Test(timeout=60000)
+  public void testBalancerEndInNoMoveProgress() throws Exception {
+    Configuration conf = createConf();
+    long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
+    String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
+    String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};
+    
+    int numOfDatanodes = capacities.length;
+    assertEquals(numOfDatanodes, racks.length);
+    assertEquals(numOfDatanodes, nodeGroups.length);
+    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(capacities.length)
+                                .racks(racks)
+                                .simulatedCapacities(capacities);
+    MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
+    cluster = new MiniDFSClusterWithNodeGroup(builder);
+    try {
+      cluster.waitActive();
+      client = NameNodeProxies.createProxy(conf, 
+          cluster.getFileSystem(0).getUri(),
+          ClientProtocol.class).getProxy();
+
+      long totalCapacity = TestBalancer.sum(capacities);
+      // fill up the cluster to be 60% full
+      long totalUsedSpace = totalCapacity * 6 / 10;
+      TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
+          (short) (3), 0);
+
+      // run balancer which can finish in 5 iterations with no block movement.
+      runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
+
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java Mon Jan 14 03:44:35 2013
@@ -56,7 +56,7 @@ public class TestBlockInfo {
 
     LOG.info("Building block list...");
     for (int i = 0; i < MAX_BLOCKS; i++) {
-      blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
+      blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
       blockInfoList.add(new BlockInfo(blockList.get(i), 3));
       dd.addBlock(blockInfoList.get(i));
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Mon Jan 14 03:44:35 2013
@@ -19,10 +19,13 @@ package org.apache.hadoop.hdfs.server.bl
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map.Entry;
 
@@ -429,4 +432,57 @@ public class TestBlockManager {
     }
     return repls;
   }
+
+  /**
+   * Test that a source node for a highest-priority replication is chosen even if all available
+   * source nodes have reached their replication limits.
+   */
+  @Test
+  public void testHighestPriReplSrcChosenDespiteMaxReplLimit() throws Exception {
+    bm.maxReplicationStreams = 0;
+    bm.replicationStreamsHardLimit = 1;
+
+    long blockId = 42;         // arbitrary
+    Block aBlock = new Block(blockId, 0, 0);
+
+    List<DatanodeDescriptor> origNodes = getNodes(0, 1);
+    // Add the block to the first node.
+    addBlockOnNodes(blockId,origNodes.subList(0,1));
+
+    List<DatanodeDescriptor> cntNodes = new LinkedList<DatanodeDescriptor>();
+    List<DatanodeDescriptor> liveNodes = new LinkedList<DatanodeDescriptor>();
+
+    assertNotNull("Chooses source node for a highest-priority replication"
+        + " even if all available source nodes have reached their replication"
+        + " limits below the hard limit.",
+        bm.chooseSourceDatanode(
+            aBlock,
+            cntNodes,
+            liveNodes,
+            new NumberReplicas(),
+            UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY));
+
+    assertNull("Does not choose a source node for a less-than-highest-priority"
+        + " replication since all available source nodes have reached"
+        + " their replication limits.",
+        bm.chooseSourceDatanode(
+            aBlock,
+            cntNodes,
+            liveNodes,
+            new NumberReplicas(),
+            UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED));
+
+    // Increase the replication count to test replication count > hard limit
+    DatanodeDescriptor targets[] = { origNodes.get(1) };
+    origNodes.get(0).addBlockToBeReplicated(aBlock, targets);
+
+    assertNull("Does not choose a source node for a highest-priority"
+        + " replication when all available nodes exceed the hard limit.",
+        bm.chooseSourceDatanode(
+            aBlock,
+            cntNodes,
+            liveNodes,
+            new NumberReplicas(),
+            UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY));
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java Mon Jan 14 03:44:35 2013
@@ -54,7 +54,7 @@ public class TestComputeInvalidateWork {
         for (int i=0; i<nodes.length; i++) {
           for(int j=0; j<3*blockInvalidateLimit+1; j++) {
             Block block = new Block(i*(blockInvalidateLimit+1)+j, 0, 
-                GenerationStamp.FIRST_VALID_STAMP);
+                GenerationStamp.LAST_RESERVED_STAMP);
             bm.addToInvalidates(block, nodes[i]);
           }
         }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java Mon Jan 14 03:44:35 2013
@@ -44,7 +44,7 @@ public class TestDatanodeDescriptor {
     DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
     ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
     for (int i=0; i<MAX_BLOCKS; i++) {
-      blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
+      blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
     }
     dd.addBlocksToBeInvalidated(blockList);
     Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java Mon Jan 14 03:44:35 2013
@@ -75,7 +75,8 @@ public class TestHeartbeatHandling {
         synchronized(hm) {
           for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
             dd.addBlockToBeReplicated(
-                new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET);
+                new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP),
+                ONE_TARGET);
           }
           DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd,
               namesystem).getCommands();
@@ -85,7 +86,7 @@ public class TestHeartbeatHandling {
           
           ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
           for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
-            blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
+            blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
           }
           dd.addBlocksToBeInvalidated(blockList);
           cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Mon Jan 14 03:44:35 2013
@@ -382,6 +382,24 @@ public class TestReplicationPolicy {
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
   }
+
+  /**
+   * In this testcase, it tries to choose more targets than available nodes and
+   * check the result, with stale node avoidance on the write path enabled.
+   * @throws Exception
+   */
+  @Test
+  public void testChooseTargetWithMoreThanAvailableNodesWithStaleness()
+      throws Exception {
+    try {
+      namenode.getNamesystem().getBlockManager().getDatanodeManager()
+        .setAvoidStaleDataNodesForWrite(true);
+      testChooseTargetWithMoreThanAvailableNodes();
+    } finally {
+      namenode.getNamesystem().getBlockManager().getDatanodeManager()
+      .setAvoidStaleDataNodesForWrite(false);
+    }
+  }
   
   /**
    * In this testcase, it tries to choose more targets than available nodes and
@@ -389,7 +407,7 @@ public class TestReplicationPolicy {
    * @throws Exception
    */
   @Test
-  public void testChooseTargetWithMoreThanAvaiableNodes() throws Exception {
+  public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
     // make data node 0 & 1 to be not qualified to choose: not enough disk space
     for(int i=0; i<2; i++) {
       dataNodes[i].updateHeartbeat(

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Mon Jan 14 03:44:35 2013
@@ -54,7 +54,7 @@ public class CreateEditsLog {
   static final String EDITS_DIR = "/tmp/EditsLogOut";
   static String edits_dir = EDITS_DIR;
   static final public long BLOCK_GENERATION_STAMP =
-    GenerationStamp.FIRST_VALID_STAMP;
+      GenerationStamp.LAST_RESERVED_STAMP;
   
   static void addFiles(FSEditLog editLog, int numFiles, short replication, 
                          int blocksPerFile, long startingBlockId,
@@ -62,7 +62,8 @@ public class CreateEditsLog {
     
     PermissionStatus p = new PermissionStatus("joeDoe", "people",
                                       new FsPermission((short)0777));
-    INodeDirectory dirInode = new INodeDirectory(p, 0L);
+    INodeDirectory dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
+        p, 0L);
     editLog.logMkDir(BASE_PATH, dirInode);
     long blockSize = 10;
     BlockInfo[] blocks = new BlockInfo[blocksPerFile];
@@ -81,8 +82,9 @@ public class CreateEditsLog {
       }
 
       INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
-                    null, replication, 0, blockSize, blocks, p, "", "", null);
-      // Append path to filename with information about blockIDs 
+          INodeId.GRANDFATHER_INODE_ID, null, replication, 0, blockSize,
+          blocks, p, "", "", null);
+     // Append path to filename with information about blockIDs 
       String path = "_" + iF + "_B" + blocks[0].getBlockId() + 
                     "_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
       String filePath = nameGenerator.getNextFileName("");
@@ -90,12 +92,12 @@ public class CreateEditsLog {
       // Log the new sub directory in edits
       if ((iF % nameGenerator.getFilesPerDirectory())  == 0) {
         String currentDir = nameGenerator.getCurrentDir();
-        dirInode = new INodeDirectory(p, 0L);
+        dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, p, 0L);
         editLog.logMkDir(currentDir, dirInode);
       }
-      editLog.logOpenFile(filePath, 
-          new INodeFileUnderConstruction(
-              p, replication, 0, blockSize, "", "", null));
+      editLog.logOpenFile(filePath, new INodeFileUnderConstruction(
+          INodeId.GRANDFATHER_INODE_ID, p, replication, 0, blockSize, "", "",
+          null));
       editLog.logCloseFile(filePath, inode);
 
       if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Mon Jan 14 03:44:35 2013
@@ -208,7 +208,7 @@ public abstract class FSImageTestUtil {
    * only a specified number of "mkdirs" operations.
    */
   public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
-      long firstTxId) throws IOException {
+      long firstTxId, long newInodeId) throws IOException {
     FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
     editLog.setNextTxId(firstTxId);
     editLog.openForWrite();
@@ -217,7 +217,7 @@ public abstract class FSImageTestUtil {
         FsPermission.createImmutable((short)0755));
     for (int i = 1; i <= numDirs; i++) {
       String dirName = "dir" + i;
-      INodeDirectory dir = new INodeDirectory(dirName, perms);
+      INodeDirectory dir = new INodeDirectory(newInodeId + i -1, dirName, perms);
       editLog.logMkDir("/" + dirName, dir);
     }
     editLog.logSync();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Mon Jan 14 03:44:35 2013
@@ -340,8 +340,8 @@ public class TestBackupNode {
       //
       // Take a checkpoint
       //
-      backup = startBackupNode(conf, op, 1);
       long txid = cluster.getNameNodeRpc().getTransactionID();
+      backup = startBackupNode(conf, op, 1);
       waitCheckpointDone(cluster, txid);
 
       for (int i = 0; i < 10; i++) {
@@ -417,11 +417,65 @@ public class TestBackupNode {
       // verify that file2 exists
       assertTrue(fileSys.exists(file2));
     } catch(IOException e) {
-      LOG.error("Error in TestBackupNode:", e);
+      LOG.error("Error in TestBackupNode: ", e);
       assertTrue(e.getLocalizedMessage(), false);
     } finally {
       fileSys.close();
       cluster.shutdown();
     }
   }
+
+  /**
+   * Verify that a file can be read both from NameNode and BackupNode.
+   */
+  @Test
+  public void testCanReadData() throws IOException {
+    Path file1 = new Path("/fileToRead.dat");
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
+    MiniDFSCluster cluster = null;
+    FileSystem fileSys = null;
+    BackupNode backup = null;
+    try {
+      // Start NameNode and BackupNode
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .numDataNodes(0).format(true).build();
+      fileSys = cluster.getFileSystem();
+      long txid = cluster.getNameNodeRpc().getTransactionID();
+      backup = startBackupNode(conf, StartupOption.BACKUP, 1);
+      waitCheckpointDone(cluster, txid);
+
+      // Setup dual NameNode configuration for DataNodes
+      String rpcAddrKeyPreffix =
+          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster";
+      String nnAddr = cluster.getNameNode().getNameNodeAddressHostPortString();
+          conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+      String bnAddr = backup.getNameNodeAddressHostPortString();
+      conf.set(DFSConfigKeys.DFS_NAMESERVICES, "bnCluster");
+      conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "bnCluster");
+      conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster",
+          "nnActive, nnBackup");
+      conf.set(rpcAddrKeyPreffix + ".nnActive", nnAddr);
+      conf.set(rpcAddrKeyPreffix + ".nnBackup", bnAddr);
+      cluster.startDataNodes(conf, 3, true, StartupOption.REGULAR, null);
+
+      DFSTestUtil.createFile(
+          fileSys, file1, fileSize, fileSize, blockSize, (short)3, seed);
+
+      // Read the same file from file systems pointing to NN and BN
+      FileSystem bnFS = FileSystem.get(
+          new Path("hdfs://" + bnAddr).toUri(), conf);
+      String nnData = DFSTestUtil.readFile(fileSys, file1);
+      String bnData = DFSTestUtil.readFile(bnFS, file1);
+      assertEquals("Data read from BackupNode and NameNode is not the same.",
+          nnData, bnData);
+    } catch(IOException e) {
+      LOG.error("Error in TestBackupNode: ", e);
+      assertTrue(e.getLocalizedMessage(), false);
+    } finally {
+      if(fileSys != null) fileSys.close();
+      if(backup != null) backup.stop();
+      if(cluster != null) cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Mon Jan 14 03:44:35 2013
@@ -152,7 +152,8 @@ public class TestEditLog {
 
       for (int i = 0; i < numTransactions; i++) {
         INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
-                            p, replication, blockSize, 0, "", "", null);
+            namesystem.allocateNewInodeId(), p, replication, blockSize, 0, "",
+            "", null);
         editLog.logOpenFile("/filename" + (startIndex + i), inode);
         editLog.logCloseFile("/filename" + (startIndex + i), inode);
         editLog.logSync();
@@ -317,6 +318,11 @@ public class TestEditLog {
       // we should now be writing to edits_inprogress_3
       fsimage.rollEditLog();
     
+      // Remember the current lastInodeId and will reset it back to test
+      // loading editlog segments.The transactions in the following allocate new
+      // inode id to write to editlogs but doesn't create ionde in namespace
+      long originalLastInodeId = namesystem.getLastInodeId();
+      
       // Create threads and make them run transactions concurrently.
       Thread threadId[] = new Thread[NUM_THREADS];
       for (int i = 0; i < NUM_THREADS; i++) {
@@ -349,6 +355,7 @@ public class TestEditLog {
       // If there were any corruptions, it is likely that the reading in
       // of these transactions will throw an exception.
       //
+      namesystem.resetLastInodeIdWithoutChecking(originalLastInodeId);
       for (Iterator<StorageDirectory> it = 
               fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
         FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Mon Jan 14 03:44:35 2013
@@ -73,7 +73,8 @@ public class TestFsLimits {
              fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
                                 "namenode")).toString());
 
-    rootInode = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, perms);
+    rootInode = new INodeDirectoryWithQuota(getMockNamesystem()
+        .allocateNewInodeId(), INodeDirectory.ROOT_NAME, perms);
     inodes = new INode[]{ rootInode, null };
     fs = null;
     fsIsReady = true;
@@ -152,7 +153,8 @@ public class TestFsLimits {
     // have to create after the caller has had a chance to set conf values
     if (fs == null) fs = new MockFSDirectory();
 
-    INode child = new INodeDirectory(name, perms);
+    INode child = new INodeDirectory(getMockNamesystem().allocateNewInodeId(),
+        name, perms);
     child.setLocalName(name);
     
     Class<?> generated = null;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Mon Jan 14 03:44:35 2013
@@ -26,14 +26,18 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.junit.Test;
 
@@ -54,9 +58,9 @@ public class TestINodeFile {
   public void testReplication () {
     replication = 3;
     preferredBlockSize = 128*1024*1024;
-    INodeFile inf = new INodeFile(new PermissionStatus(userName, null, 
-                                  FsPermission.getDefault()), null, replication,
-                                  0L, 0L, preferredBlockSize);
+    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
+        new PermissionStatus(userName, null, FsPermission.getDefault()), null,
+        replication, 0L, 0L, preferredBlockSize);
     assertEquals("True has to be returned in this case", replication,
                  inf.getFileReplication());
   }
@@ -71,9 +75,9 @@ public class TestINodeFile {
               throws IllegalArgumentException {
     replication = -1;
     preferredBlockSize = 128*1024*1024;
-    new INodeFile(new PermissionStatus(userName, null,
-                                  FsPermission.getDefault()), null, replication,
-                                  0L, 0L, preferredBlockSize);
+    new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
+        null, FsPermission.getDefault()), null, replication, 0L, 0L,
+        preferredBlockSize);
   }
 
   /**
@@ -84,20 +88,20 @@ public class TestINodeFile {
   public void testPreferredBlockSize () {
     replication = 3;
     preferredBlockSize = 128*1024*1024;
-    INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
-                                  FsPermission.getDefault()), null, replication,
-                                  0L, 0L, preferredBlockSize);
-    assertEquals("True has to be returned in this case", preferredBlockSize,
-           inf.getPreferredBlockSize());
-  }
+    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
+        new PermissionStatus(userName, null, FsPermission.getDefault()), null,
+        replication, 0L, 0L, preferredBlockSize);
+   assertEquals("True has to be returned in this case", preferredBlockSize,
+        inf.getPreferredBlockSize());
+ }
 
   @Test
   public void testPreferredBlockSizeUpperBound () {
     replication = 3;
     preferredBlockSize = BLKSIZE_MAXVALUE;
-    INodeFile inf = new INodeFile(new PermissionStatus(userName, null, 
-                                  FsPermission.getDefault()), null, replication,
-                                  0L, 0L, preferredBlockSize);
+    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
+        new PermissionStatus(userName, null, FsPermission.getDefault()), null,
+        replication, 0L, 0L, preferredBlockSize);
     assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE,
                  inf.getPreferredBlockSize());
   }
@@ -112,9 +116,9 @@ public class TestINodeFile {
               throws IllegalArgumentException {
     replication = 3;
     preferredBlockSize = -1;
-    new INodeFile(new PermissionStatus(userName, null, 
-                                  FsPermission.getDefault()), null, replication,
-                                  0L, 0L, preferredBlockSize);
+    new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
+        null, FsPermission.getDefault()), null, replication, 0L, 0L,
+        preferredBlockSize);
   } 
 
   /**
@@ -127,10 +131,10 @@ public class TestINodeFile {
               throws IllegalArgumentException {
     replication = 3;
     preferredBlockSize = BLKSIZE_MAXVALUE+1;
-    new INodeFile(new PermissionStatus(userName, null, 
-                                  FsPermission.getDefault()), null, replication,
-                                  0L, 0L, preferredBlockSize);
-  }
+    new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
+        null, FsPermission.getDefault()), null, replication, 0L, 0L,
+        preferredBlockSize);
+ }
 
   @Test
   public void testGetFullPathName() {
@@ -139,12 +143,14 @@ public class TestINodeFile {
 
     replication = 3;
     preferredBlockSize = 128*1024*1024;
-    INodeFile inf = new INodeFile(perms, null, replication,
-                                  0L, 0L, preferredBlockSize);
+    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perms, null,
+        replication, 0L, 0L, preferredBlockSize);
     inf.setLocalName("f");
 
-    INodeDirectory root = new INodeDirectory(INodeDirectory.ROOT_NAME, perms);
-    INodeDirectory dir = new INodeDirectory("d", perms);
+    INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
+        INodeDirectory.ROOT_NAME, perms);
+    INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, "d",
+        perms);
 
     assertEquals("f", inf.getFullPathName());
     assertEquals("", inf.getLocalParentDir());
@@ -242,7 +248,7 @@ public class TestINodeFile {
     for (int i = 0; i < nCount; i++) {
       PermissionStatus perms = new PermissionStatus(userName, null,
           FsPermission.getDefault());
-      iNodes[i] = new INodeFile(perms, null, replication, 0L, 0L,
+      iNodes[i] = new INodeFile(i, perms, null, replication, 0L, 0L,
           preferredBlockSize);
       iNodes[i].setLocalName(fileNamePrefix +  Integer.toString(i));
       BlockInfo newblock = new BlockInfo(replication);
@@ -293,10 +299,10 @@ public class TestINodeFile {
     }
 
     {//cast from INodeFile
-      final INode from = new INodeFile(
-          perm, null, replication, 0L, 0L, preferredBlockSize);
-      
-      //cast to INodeFile, should success
+      final INode from = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perm,
+          null, replication, 0L, 0L, preferredBlockSize);
+
+     //cast to INodeFile, should success
       final INodeFile f = INodeFile.valueOf(from, path);
       assertTrue(f == from);
 
@@ -318,8 +324,9 @@ public class TestINodeFile {
 
     {//cast from INodeFileUnderConstruction
       final INode from = new INodeFileUnderConstruction(
-          perm, replication, 0L, 0L, "client", "machine", null);
-      
+          INodeId.GRANDFATHER_INODE_ID, perm, replication, 0L, 0L, "client",
+          "machine", null);
+    
       //cast to INodeFile, should success
       final INodeFile f = INodeFile.valueOf(from, path);
       assertTrue(f == from);
@@ -338,7 +345,8 @@ public class TestINodeFile {
     }
 
     {//cast from INodeDirectory
-      final INode from = new INodeDirectory(perm, 0L);
+      final INode from = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, perm,
+          0L);
 
       //cast to INodeFile, should fail
       try {
@@ -361,4 +369,47 @@ public class TestINodeFile {
       assertTrue(d == from);
     }
   }
+
+  /**
+   * Verify root always has inode id 1001 and new formated fsimage has last
+   * allocated inode id 1000. Validate correct lastInodeId is persisted.
+   * @throws IOException
+   */
+  @Test
+  public void TestInodeId() throws IOException {
+
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+        DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .build();
+    cluster.waitActive();
+    
+    FSNamesystem fsn = cluster.getNamesystem();
+    long lastId = fsn.getLastInodeId();
+
+    assertTrue(lastId == 1001);
+
+    // Create one directory and the last inode id should increase to 1002
+    FileSystem fs = cluster.getFileSystem();
+    Path path = new Path("/test1");
+    assertTrue(fs.mkdirs(path));
+    assertTrue(fsn.getLastInodeId() == 1002);
+
+    Path filePath = new Path("/test1/file");
+    fs.create(filePath);
+    assertTrue(fsn.getLastInodeId() == 1003);
+
+    // Rename doesn't increase inode id
+    Path renamedPath = new Path("/test2");
+    fs.rename(path, renamedPath);
+    assertTrue(fsn.getLastInodeId() == 1003);
+
+    cluster.restartNameNode();
+    cluster.waitActive();
+    // Make sure empty editlog can be handled
+    cluster.restartNameNode();
+    cluster.waitActive();
+    assertTrue(fsn.getLastInodeId() == 1003);
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java?rev=1432796&r1=1432795&r2=1432796&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java Mon Jan 14 03:44:35 2013
@@ -25,6 +25,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.junit.Test;
 
+import com.google.common.base.Charsets;
+
 
 /**
  * 
@@ -45,7 +47,7 @@ public class TestPathComponents {
     String pathString = str;
     byte[][] oldPathComponents = INode.getPathComponents(pathString);
     byte[][] newPathComponents = 
-                DFSUtil.bytes2byteArray(pathString.getBytes("UTF-8"),
+                DFSUtil.bytes2byteArray(pathString.getBytes(Charsets.UTF_8),
                                         (byte) Path.SEPARATOR_CHAR);
     if (oldPathComponents[0] == null) {
       assertTrue(oldPathComponents[0] == newPathComponents[0]);



Mime
View raw message